1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 md.h : kernel internal structure of the Linux MD driver
4 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
11 #include <linux/blkdev.h>
12 #include <linux/backing-dev.h>
13 #include <linux/badblocks.h>
14 #include <linux/kobject.h>
15 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/timer.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 #include "md-cluster.h"
23 #define MaxSector (~(sector_t)0)
26 * These flags should really be called "NO_RETRY" rather than
27 * "FAILFAST" because they don't make any promise about time lapse,
28 * only about the number of retries, which will be zero.
29 * REQ_FAILFAST_DRIVER is not included because
30 * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.")
31 * seems to suggest that the errors it avoids retrying should usually
34 #define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT)
36 * MD's 'extended' device
39 struct list_head same_set; /* RAID devices within the same set */
41 sector_t sectors; /* Device size (in 512bytes sectors) */
42 struct mddev *mddev; /* RAID array if running */
43 int last_events; /* IO event timestamp */
46 * If meta_bdev is non-NULL, it means that a separate device is
47 * being used to store the metadata (superblock/bitmap) which
48 * would otherwise be contained on the same device as the data (bdev).
50 struct block_device *meta_bdev;
51 struct block_device *bdev; /* block device handle */
53 struct page *sb_page, *bb_page;
56 sector_t data_offset; /* start of data in array */
57 sector_t new_data_offset;/* only relevant while reshaping */
58 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
59 int sb_size; /* bytes in the superblock */
60 int preferred_minor; /* autorun support */
64 /* A device can be in one of three states based on two flags:
65 * Not working: faulty==1 in_sync==0
66 * Fully working: faulty==0 in_sync==1
69 * faulty==0 in_sync==0
71 * It can never have faulty==1, in_sync==1
72 * This reduces the burden of testing multiple flags in many cases
75 unsigned long flags; /* bit set of 'enum flag_bits' bits. */
76 wait_queue_head_t blocked_wait;
78 int desc_nr; /* descriptor index in the superblock */
79 int raid_disk; /* role of device in array */
80 int new_raid_disk; /* role that the device will have in
81 * the array after a level-change completes.
83 int saved_raid_disk; /* role that device used to have in the
84 * array and could again if we did a partial
85 * resync from the bitmap
88 sector_t recovery_offset;/* If this device has been partially
89 * recovered, this is where we were
92 sector_t journal_tail; /* If this device is a journal device,
93 * this is the journal tail (journal
94 * recovery start point)
98 atomic_t nr_pending; /* number of pending requests.
99 * only maintained for arrays that
100 * support hot removal
102 atomic_t read_errors; /* number of consecutive read errors that
103 * we have tried to ignore.
105 time64_t last_read_error; /* monotonic time since our
108 atomic_t corrected_errors; /* number of corrected read errors,
109 * for reporting to userspace and storing
114 * The members for check collision of write behind IOs.
116 struct list_head wb_list;
117 spinlock_t wb_list_lock;
118 wait_queue_head_t wb_io_wait;
120 struct work_struct del_work; /* used for delayed sysfs removal */
122 struct kernfs_node *sysfs_state; /* handle for 'state'
125 struct badblocks badblocks;
128 short offset; /* Offset from superblock to start of PPL.
129 * Not used by external metadata. */
130 unsigned int size; /* Size in sectors of the PPL space */
131 sector_t sector; /* First sector of the PPL space */
135 Faulty, /* device is known to have a fault */
136 In_sync, /* device is in_sync with rest of array */
137 Bitmap_sync, /* ..actually, not quite In_sync. Need a
138 * bitmap-based recovery to get fully in sync.
139 * The bit is only meaningful before device
140 * has been passed to pers->hot_add_disk.
142 WriteMostly, /* Avoid reading if at all possible */
143 AutoDetected, /* added by auto-detect */
144 Blocked, /* An error occurred but has not yet
145 * been acknowledged by the metadata
146 * handler, so don't allow writes
147 * until it is cleared */
148 WriteErrorSeen, /* A write error has been seen on this
151 FaultRecorded, /* Intermediate state for clearing
152 * Blocked. The Fault is/will-be
153 * recorded in the metadata, but that
154 * metadata hasn't been stored safely
157 BlockedBadBlocks, /* A writer is blocked because they
158 * found an unacknowledged bad-block.
159 * This can safely be cleared at any
160 * time, and the writer will re-check.
161 * It may be set at any time, and at
162 * worst the writer will timeout and
163 * re-check. So setting it as
164 * accurately as possible is good, but
165 * not absolutely critical.
167 WantReplacement, /* This device is a candidate to be
168 * hot-replaced, either because it has
169 * reported some faults, or because
170 * of explicit request.
172 Replacement, /* This device is a replacement for
173 * a want_replacement device with same
176 Candidate, /* For clustered environments only:
177 * This device is seen locally but not
178 * by the whole cluster
180 Journal, /* This device is used as journal for
182 * Usually, this device should be faster
183 * than other devices in the array
186 RemoveSynchronized, /* synchronize_rcu() was called after
187 * this device was known to be faulty,
188 * so it is safe to remove without
189 * another synchronize_rcu() call.
191 ExternalBbl, /* External metadata provides bad
192 * block management for a disk
194 FailFast, /* Minimal retries should be attempted on
195 * this device, so use REQ_FAILFAST_DEV.
196 * Also don't try to repair failed reads.
197 * It is expects that no bad block log
200 LastDev, /* Seems to be the last working dev as
201 * it didn't fail, so don't use FailFast
202 * any more for metadata
205 * multiqueue device should check if there
206 * is collision between write behind bios.
210 static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
211 sector_t *first_bad, int *bad_sectors)
213 if (unlikely(rdev->badblocks.count)) {
214 int rv = badblocks_check(&rdev->badblocks, rdev->data_offset + s,
216 first_bad, bad_sectors);
218 *first_bad -= rdev->data_offset;
223 extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
225 extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
227 struct md_cluster_info;
229 /* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
231 MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
232 MD_CLOSING, /* If set, we are closing the array, do not open
234 MD_JOURNAL_CLEAN, /* A raid with journal is already clean */
235 MD_HAS_JOURNAL, /* The raid array has journal feature set */
236 MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node
237 * already took resync lock, need to
238 * release the lock */
239 MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is
240 * supported as calls to md_error() will
241 * never cause the array to become failed.
243 MD_HAS_PPL, /* The raid array has PPL feature set */
244 MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
245 MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
246 * the metadata without taking reconfig_mutex.
248 MD_UPDATING_SB, /* md_check_recovery is updating the metadata
249 * without explicitly holding reconfig_mutex.
253 enum mddev_sb_flags {
254 MD_SB_CHANGE_DEVS, /* Some device status has changed */
255 MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */
256 MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */
257 MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */
260 #define NR_WB_INFOS 8
261 /* record current range of write behind IOs */
265 struct list_head list;
270 struct md_personality *pers;
273 struct list_head disks;
275 unsigned long sb_flags;
280 int sysfs_active; /* set when sysfs deletes
281 * are happening, so run/
282 * takeover/stop are not safe
284 struct gendisk *gendisk;
288 #define UNTIL_IOCTL 1
291 /* Superblock information */
296 int external; /* metadata is
297 * managed externally */
298 char metadata_type[17]; /* externally set*/
300 time64_t ctime, utime;
305 sector_t dev_sectors; /* used size of
306 * component devices */
307 sector_t array_sectors; /* exported array size */
308 int external_size; /* size managed
311 /* If the last 'event' was simply a clean->dirty transition, and
312 * we didn't write it to the spares, then it is safe and simple
313 * to just decrement the event count on a dirty->clean transition.
314 * So we record that possibility here.
316 int can_decrease_events;
320 /* If the array is being reshaped, we need to record the
321 * new shape and an indication of where we are up to.
322 * This is written to the superblock.
323 * If reshape_position is MaxSector, then no reshape is happening (yet).
325 sector_t reshape_position;
326 int delta_disks, new_level, new_layout;
327 int new_chunk_sectors;
328 int reshape_backwards;
330 struct md_thread *thread; /* management thread */
331 struct md_thread *sync_thread; /* doing resync or reconstruct */
333 /* 'last_sync_action' is initialized to "none". It is set when a
334 * sync operation (i.e "data-check", "requested-resync", "resync",
335 * "recovery", or "reshape") is started. It holds this value even
336 * when the sync thread is "frozen" (interrupted) or "idle" (stopped
337 * or finished). It is overwritten when a new sync operation is begun.
339 char *last_sync_action;
340 sector_t curr_resync; /* last block scheduled */
341 /* As resync requests can complete out of order, we cannot easily track
342 * how much resync has been completed. So we occasionally pause until
343 * everything completes, then set curr_resync_completed to curr_resync.
344 * As such it may be well behind the real resync mark, but it is a value
347 sector_t curr_resync_completed;
348 unsigned long resync_mark; /* a recent timestamp */
349 sector_t resync_mark_cnt;/* blocks written at resync_mark */
350 sector_t curr_mark_cnt; /* blocks scheduled now */
352 sector_t resync_max_sectors; /* may be set by personality */
354 atomic64_t resync_mismatches; /* count of sectors where
355 * parity/replica mismatch found
358 /* allow user-space to request suspension of IO to regions of the array */
361 /* if zero, use the system-wide default */
365 /* resync even though the same disks are shared among md-devices */
368 int ok_start_degraded;
370 unsigned long recovery;
371 /* If a RAID personality determines that recovery (of a particular
372 * device) will fail due to a read error on the source device, it
373 * takes a copy of this number and does not attempt recovery again
374 * until this number changes.
376 int recovery_disabled;
378 int in_sync; /* know to not need resync */
379 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
380 * that we are never stopping an array while it is open.
381 * 'reconfig_mutex' protects all other reconfiguration.
382 * These locks are separate due to conflicting interactions
383 * with bdev->bd_mutex.
385 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
386 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
388 struct mutex open_mutex;
389 struct mutex reconfig_mutex;
390 atomic_t active; /* general refcount */
391 atomic_t openers; /* number of active opens */
393 int changed; /* True if we might need to
394 * reread partition info */
395 int degraded; /* whether md should consider
399 atomic_t recovery_active; /* blocks scheduled, but not written */
400 wait_queue_head_t recovery_wait;
401 sector_t recovery_cp;
402 sector_t resync_min; /* user requested sync
404 sector_t resync_max; /* resync should pause
405 * when it gets here */
407 struct kernfs_node *sysfs_state; /* handle for 'array_state'
410 struct kernfs_node *sysfs_action; /* handle for 'sync_action' */
412 struct work_struct del_work; /* used for delayed sysfs removal */
415 * flush_bio transition from NULL to !NULL
416 * rdev superblocks, events
417 * clearing MD_CHANGE_*
418 * in_sync - and related safemode and MD_CHANGE changes
419 * pers (also protected by reconfig_mutex and pending IO).
421 * clearing ->bitmap_info.file
422 * changing ->resync_{min,max}
423 * setting MD_RECOVERY_RUNNING (which interacts with resync_{min,max})
426 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
427 atomic_t pending_writes; /* number of active superblock writes */
429 unsigned int safemode; /* if set, update "clean" superblock
430 * when no writes pending.
432 unsigned int safemode_delay;
433 struct timer_list safemode_timer;
434 struct percpu_ref writes_pending;
435 int sync_checkers; /* # of threads checking writes_pending */
436 struct request_queue *queue; /* for plugging ... */
438 struct bitmap *bitmap; /* the bitmap for the device */
440 struct file *file; /* the bitmap file */
441 loff_t offset; /* offset from superblock of
442 * start of bitmap. May be
443 * negative, but not '0'
444 * For external metadata, offset
445 * from start of device.
447 unsigned long space; /* space available at this offset */
448 loff_t default_offset; /* this is the offset to use when
449 * hot-adding a bitmap. It should
450 * eventually be settable by sysfs.
452 unsigned long default_space; /* space available at
455 unsigned long chunksize;
456 unsigned long daemon_sleep; /* how many jiffies between updates? */
457 unsigned long max_write_behind; /* write-behind mode */
459 int nodes; /* Maximum number of nodes in the cluster */
460 char cluster_name[64]; /* Name of the cluster */
463 atomic_t max_corr_read_errors; /* max read retries */
464 struct list_head all_mddevs;
466 struct attribute_group *to_remove;
468 struct bio_set bio_set;
469 struct bio_set sync_set; /* for sync operations like
470 * metadata and bitmap writes
473 /* Generic flush handling.
474 * The last to finish preflush schedules a worker to submit
475 * the rest of the request (without the REQ_PREFLUSH flag).
477 struct bio *flush_bio;
478 atomic_t flush_pending;
479 ktime_t start_flush, last_flush; /* last_flush is when the last completed
482 struct work_struct flush_work;
483 struct work_struct event_work; /* used by dm to report failure event */
484 mempool_t *wb_info_pool;
485 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
486 struct md_cluster_info *cluster_info;
487 unsigned int good_device_nr; /* good device num within cluster raid */
489 bool has_superblocks:1;
492 enum recovery_flags {
494 * If neither SYNC or RESHAPE are set, then it is a recovery.
496 MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */
497 MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */
498 MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */
499 MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */
500 MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */
501 MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */
502 MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */
503 MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */
504 MD_RECOVERY_RESHAPE, /* A reshape is happening */
505 MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */
506 MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */
507 MD_RECOVERY_WAIT, /* waiting for pers->start() to finish */
508 MD_RESYNCING_REMOTE, /* remote node is running resync thread */
511 static inline int __must_check mddev_lock(struct mddev *mddev)
513 return mutex_lock_interruptible(&mddev->reconfig_mutex);
516 /* Sometimes we need to take the lock in a situation where
517 * failure due to interrupts is not acceptable.
519 static inline void mddev_lock_nointr(struct mddev *mddev)
521 mutex_lock(&mddev->reconfig_mutex);
524 static inline int mddev_trylock(struct mddev *mddev)
526 return mutex_trylock(&mddev->reconfig_mutex);
528 extern void mddev_unlock(struct mddev *mddev);
530 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
532 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
535 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
537 atomic_add(nr_sectors, &bio->bi_disk->sync_io);
540 struct md_personality
544 struct list_head list;
545 struct module *owner;
546 bool (*make_request)(struct mddev *mddev, struct bio *bio);
548 * start up works that do NOT require md_thread. tasks that
549 * requires md_thread should go into start()
551 int (*run)(struct mddev *mddev);
552 /* start up works that require md threads */
553 int (*start)(struct mddev *mddev);
554 void (*free)(struct mddev *mddev, void *priv);
555 void (*status)(struct seq_file *seq, struct mddev *mddev);
556 /* error_handler must set ->faulty and clear ->in_sync
557 * if appropriate, and should abort recovery if needed
559 void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
560 int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
561 int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
562 int (*spare_active) (struct mddev *mddev);
563 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
564 int (*resize) (struct mddev *mddev, sector_t sectors);
565 sector_t (*size) (struct mddev *mddev, sector_t sectors, int raid_disks);
566 int (*check_reshape) (struct mddev *mddev);
567 int (*start_reshape) (struct mddev *mddev);
568 void (*finish_reshape) (struct mddev *mddev);
569 void (*update_reshape_pos) (struct mddev *mddev);
570 /* quiesce suspends or resumes internal processing.
571 * 1 - stop new actions and wait for action io to complete
572 * 0 - return to normal behaviour
574 void (*quiesce) (struct mddev *mddev, int quiesce);
575 /* takeover is used to transition an array from one
576 * personality to another. The new personality must be able
577 * to handle the data in the current layout.
578 * e.g. 2drive raid1 -> 2drive raid5
579 * ndrive raid5 -> degraded n+1drive raid6 with special layout
580 * If the takeover succeeds, a new 'private' structure is returned.
581 * This needs to be installed and then ->run used to activate the
584 void *(*takeover) (struct mddev *mddev);
585 /* congested implements bdi.congested_fn().
586 * Will not be called while array is 'suspended' */
587 int (*congested)(struct mddev *mddev, int bits);
588 /* Changes the consistency policy of an active array. */
589 int (*change_consistency_policy)(struct mddev *mddev, const char *buf);
592 struct md_sysfs_entry {
593 struct attribute attr;
594 ssize_t (*show)(struct mddev *, char *);
595 ssize_t (*store)(struct mddev *, const char *, size_t);
597 extern struct attribute_group md_bitmap_group;
599 static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
602 return sysfs_get_dirent(sd, name);
605 static inline void sysfs_notify_dirent_safe(struct kernfs_node *sd)
608 sysfs_notify_dirent(sd);
611 static inline char * mdname (struct mddev * mddev)
613 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
616 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
619 if (!test_bit(Replacement, &rdev->flags) &&
620 !test_bit(Journal, &rdev->flags) &&
622 sprintf(nm, "rd%d", rdev->raid_disk);
623 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
628 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
631 if (!test_bit(Replacement, &rdev->flags) &&
632 !test_bit(Journal, &rdev->flags) &&
634 sprintf(nm, "rd%d", rdev->raid_disk);
635 sysfs_remove_link(&mddev->kobj, nm);
640 * iterates through some rdev ringlist. It's safe to remove the
641 * current 'rdev'. Dont touch 'tmp' though.
643 #define rdev_for_each_list(rdev, tmp, head) \
644 list_for_each_entry_safe(rdev, tmp, head, same_set)
647 * iterates through the 'same array disks' ringlist
649 #define rdev_for_each(rdev, mddev) \
650 list_for_each_entry(rdev, &((mddev)->disks), same_set)
652 #define rdev_for_each_safe(rdev, tmp, mddev) \
653 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
655 #define rdev_for_each_rcu(rdev, mddev) \
656 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
659 void (*run) (struct md_thread *thread);
661 wait_queue_head_t wqueue;
663 struct task_struct *tsk;
664 unsigned long timeout;
668 #define THREAD_WAKEUP 0
670 static inline void safe_put_page(struct page *p)
675 extern int register_md_personality(struct md_personality *p);
676 extern int unregister_md_personality(struct md_personality *p);
677 extern int register_md_cluster_operations(struct md_cluster_operations *ops,
678 struct module *module);
679 extern int unregister_md_cluster_operations(void);
680 extern int md_setup_cluster(struct mddev *mddev, int nodes);
681 extern void md_cluster_stop(struct mddev *mddev);
682 extern struct md_thread *md_register_thread(
683 void (*run)(struct md_thread *thread),
686 extern void md_unregister_thread(struct md_thread **threadp);
687 extern void md_wakeup_thread(struct md_thread *thread);
688 extern void md_check_recovery(struct mddev *mddev);
689 extern void md_reap_sync_thread(struct mddev *mddev);
690 extern int mddev_init_writes_pending(struct mddev *mddev);
691 extern bool md_write_start(struct mddev *mddev, struct bio *bi);
692 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
693 extern void md_write_end(struct mddev *mddev);
694 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
695 extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
696 extern void md_finish_reshape(struct mddev *mddev);
698 extern int mddev_congested(struct mddev *mddev, int bits);
699 extern void md_flush_request(struct mddev *mddev, struct bio *bio);
700 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
701 sector_t sector, int size, struct page *page);
702 extern int md_super_wait(struct mddev *mddev);
703 extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
704 struct page *page, int op, int op_flags,
706 extern void md_do_sync(struct md_thread *thread);
707 extern void md_new_event(struct mddev *mddev);
708 extern void md_allow_write(struct mddev *mddev);
709 extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
710 extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
711 extern int md_check_no_bitmap(struct mddev *mddev);
712 extern int md_integrity_register(struct mddev *mddev);
713 extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
714 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
716 extern void mddev_init(struct mddev *mddev);
717 extern int md_run(struct mddev *mddev);
718 extern int md_start(struct mddev *mddev);
719 extern void md_stop(struct mddev *mddev);
720 extern void md_stop_writes(struct mddev *mddev);
721 extern int md_rdev_init(struct md_rdev *rdev);
722 extern void md_rdev_clear(struct md_rdev *rdev);
724 extern void md_handle_request(struct mddev *mddev, struct bio *bio);
725 extern void mddev_suspend(struct mddev *mddev);
726 extern void mddev_resume(struct mddev *mddev);
727 extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
728 struct mddev *mddev);
730 extern void md_reload_sb(struct mddev *mddev, int raid_disk);
731 extern void md_update_sb(struct mddev *mddev, int force);
732 extern void md_kick_rdev_from_array(struct md_rdev * rdev);
733 extern void mddev_create_wb_pool(struct mddev *mddev, struct md_rdev *rdev,
735 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
736 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);
738 static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
740 int faulty = test_bit(Faulty, &rdev->flags);
741 if (atomic_dec_and_test(&rdev->nr_pending) && faulty) {
742 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
743 md_wakeup_thread(mddev->thread);
747 extern struct md_cluster_operations *md_cluster_ops;
748 static inline int mddev_is_clustered(struct mddev *mddev)
750 return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
753 /* clear unsupported mddev_flags */
754 static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
755 unsigned long unsupported_flags)
757 mddev->flags &= ~unsupported_flags;
760 static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
762 if (bio_op(bio) == REQ_OP_WRITE_SAME &&
763 !bio->bi_disk->queue->limits.max_write_same_sectors)
764 mddev->queue->limits.max_write_same_sectors = 0;
767 static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
769 if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
770 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
771 mddev->queue->limits.max_write_zeroes_sectors = 0;
773 #endif /* _MD_MD_H */