]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/md/md.c
f389d8abe137d377044a413a63c664850cc8bc97
[linux.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33
34    Errors, Warnings, etc.
35    Please use:
36      pr_crit() for error conditions that risk data loss
37      pr_err() for error conditions that are unexpected, like an IO error
38          or internal inconsistency
39      pr_warn() for error conditions that could have been predicated, like
40          adding a device to an array when it has incompatible metadata
41      pr_info() for every interesting, very rare events, like an array starting
42          or stopping, or resync starting or stopping
43      pr_debug() for everything else.
44
45 */
46
47 #include <linux/kthread.h>
48 #include <linux/blkdev.h>
49 #include <linux/badblocks.h>
50 #include <linux/sysctl.h>
51 #include <linux/seq_file.h>
52 #include <linux/fs.h>
53 #include <linux/poll.h>
54 #include <linux/ctype.h>
55 #include <linux/string.h>
56 #include <linux/hdreg.h>
57 #include <linux/proc_fs.h>
58 #include <linux/random.h>
59 #include <linux/module.h>
60 #include <linux/reboot.h>
61 #include <linux/file.h>
62 #include <linux/compat.h>
63 #include <linux/delay.h>
64 #include <linux/raid/md_p.h>
65 #include <linux/raid/md_u.h>
66 #include <linux/slab.h>
67 #include "md.h"
68 #include "bitmap.h"
69 #include "md-cluster.h"
70
71 #ifndef MODULE
72 static void autostart_arrays(int part);
73 #endif
74
75 /* pers_list is a list of registered personalities protected
76  * by pers_lock.
77  * pers_lock does extra service to protect accesses to
78  * mddev->thread when the mutex cannot be held.
79  */
80 static LIST_HEAD(pers_list);
81 static DEFINE_SPINLOCK(pers_lock);
82
83 struct md_cluster_operations *md_cluster_ops;
84 EXPORT_SYMBOL(md_cluster_ops);
85 struct module *md_cluster_mod;
86 EXPORT_SYMBOL(md_cluster_mod);
87
88 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
89 static struct workqueue_struct *md_wq;
90 static struct workqueue_struct *md_misc_wq;
91
92 static int remove_and_add_spares(struct mddev *mddev,
93                                  struct md_rdev *this);
94 static void mddev_detach(struct mddev *mddev);
95
96 /*
97  * Default number of read corrections we'll attempt on an rdev
98  * before ejecting it from the array. We divide the read error
99  * count by 2 for every hour elapsed between read errors.
100  */
101 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
102 /*
103  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
104  * is 1000 KB/sec, so the extra system load does not show up that much.
105  * Increase it if you want to have more _guaranteed_ speed. Note that
106  * the RAID driver will use the maximum available bandwidth if the IO
107  * subsystem is idle. There is also an 'absolute maximum' reconstruction
108  * speed limit - in case reconstruction slows down your system despite
109  * idle IO detection.
110  *
111  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
112  * or /sys/block/mdX/md/sync_speed_{min,max}
113  */
114
115 static int sysctl_speed_limit_min = 1000;
116 static int sysctl_speed_limit_max = 200000;
117 static inline int speed_min(struct mddev *mddev)
118 {
119         return mddev->sync_speed_min ?
120                 mddev->sync_speed_min : sysctl_speed_limit_min;
121 }
122
123 static inline int speed_max(struct mddev *mddev)
124 {
125         return mddev->sync_speed_max ?
126                 mddev->sync_speed_max : sysctl_speed_limit_max;
127 }
128
129 static struct ctl_table_header *raid_table_header;
130
131 static struct ctl_table raid_table[] = {
132         {
133                 .procname       = "speed_limit_min",
134                 .data           = &sysctl_speed_limit_min,
135                 .maxlen         = sizeof(int),
136                 .mode           = S_IRUGO|S_IWUSR,
137                 .proc_handler   = proc_dointvec,
138         },
139         {
140                 .procname       = "speed_limit_max",
141                 .data           = &sysctl_speed_limit_max,
142                 .maxlen         = sizeof(int),
143                 .mode           = S_IRUGO|S_IWUSR,
144                 .proc_handler   = proc_dointvec,
145         },
146         { }
147 };
148
149 static struct ctl_table raid_dir_table[] = {
150         {
151                 .procname       = "raid",
152                 .maxlen         = 0,
153                 .mode           = S_IRUGO|S_IXUGO,
154                 .child          = raid_table,
155         },
156         { }
157 };
158
159 static struct ctl_table raid_root_table[] = {
160         {
161                 .procname       = "dev",
162                 .maxlen         = 0,
163                 .mode           = 0555,
164                 .child          = raid_dir_table,
165         },
166         {  }
167 };
168
169 static const struct block_device_operations md_fops;
170
171 static int start_readonly;
172
173 /* bio_clone_mddev
174  * like bio_clone, but with a local bio set
175  */
176
177 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
178                             struct mddev *mddev)
179 {
180         struct bio *b;
181
182         if (!mddev || !mddev->bio_set)
183                 return bio_alloc(gfp_mask, nr_iovecs);
184
185         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
186         if (!b)
187                 return NULL;
188         return b;
189 }
190 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
191
192 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
193                             struct mddev *mddev)
194 {
195         if (!mddev || !mddev->bio_set)
196                 return bio_clone(bio, gfp_mask);
197
198         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
199 }
200 EXPORT_SYMBOL_GPL(bio_clone_mddev);
201
202 /*
203  * We have a system wide 'event count' that is incremented
204  * on any 'interesting' event, and readers of /proc/mdstat
205  * can use 'poll' or 'select' to find out when the event
206  * count increases.
207  *
208  * Events are:
209  *  start array, stop array, error, add device, remove device,
210  *  start build, activate spare
211  */
212 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
213 static atomic_t md_event_count;
214 void md_new_event(struct mddev *mddev)
215 {
216         atomic_inc(&md_event_count);
217         wake_up(&md_event_waiters);
218 }
219 EXPORT_SYMBOL_GPL(md_new_event);
220
221 /*
222  * Enables to iterate over all existing md arrays
223  * all_mddevs_lock protects this list.
224  */
225 static LIST_HEAD(all_mddevs);
226 static DEFINE_SPINLOCK(all_mddevs_lock);
227
228 /*
229  * iterates through all used mddevs in the system.
230  * We take care to grab the all_mddevs_lock whenever navigating
231  * the list, and to always hold a refcount when unlocked.
232  * Any code which breaks out of this loop while own
233  * a reference to the current mddev and must mddev_put it.
234  */
235 #define for_each_mddev(_mddev,_tmp)                                     \
236                                                                         \
237         for (({ spin_lock(&all_mddevs_lock);                            \
238                 _tmp = all_mddevs.next;                                 \
239                 _mddev = NULL;});                                       \
240              ({ if (_tmp != &all_mddevs)                                \
241                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
242                 spin_unlock(&all_mddevs_lock);                          \
243                 if (_mddev) mddev_put(_mddev);                          \
244                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
245                 _tmp != &all_mddevs;});                                 \
246              ({ spin_lock(&all_mddevs_lock);                            \
247                 _tmp = _tmp->next;})                                    \
248                 )
249
250 /* Rather than calling directly into the personality make_request function,
251  * IO requests come here first so that we can check if the device is
252  * being suspended pending a reconfiguration.
253  * We hold a refcount over the call to ->make_request.  By the time that
254  * call has finished, the bio has been linked into some internal structure
255  * and so is visible to ->quiesce(), so we don't need the refcount any more.
256  */
257 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
258 {
259         const int rw = bio_data_dir(bio);
260         struct mddev *mddev = q->queuedata;
261         unsigned int sectors;
262         int cpu;
263
264         blk_queue_split(q, &bio, q->bio_split);
265
266         if (mddev == NULL || mddev->pers == NULL) {
267                 bio_io_error(bio);
268                 return BLK_QC_T_NONE;
269         }
270         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
271                 if (bio_sectors(bio) != 0)
272                         bio->bi_error = -EROFS;
273                 bio_endio(bio);
274                 return BLK_QC_T_NONE;
275         }
276         smp_rmb(); /* Ensure implications of  'active' are visible */
277         rcu_read_lock();
278         if (mddev->suspended) {
279                 DEFINE_WAIT(__wait);
280                 for (;;) {
281                         prepare_to_wait(&mddev->sb_wait, &__wait,
282                                         TASK_UNINTERRUPTIBLE);
283                         if (!mddev->suspended)
284                                 break;
285                         rcu_read_unlock();
286                         schedule();
287                         rcu_read_lock();
288                 }
289                 finish_wait(&mddev->sb_wait, &__wait);
290         }
291         atomic_inc(&mddev->active_io);
292         rcu_read_unlock();
293
294         /*
295          * save the sectors now since our bio can
296          * go away inside make_request
297          */
298         sectors = bio_sectors(bio);
299         /* bio could be mergeable after passing to underlayer */
300         bio->bi_opf &= ~REQ_NOMERGE;
301         mddev->pers->make_request(mddev, bio);
302
303         cpu = part_stat_lock();
304         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
305         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
306         part_stat_unlock();
307
308         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
309                 wake_up(&mddev->sb_wait);
310
311         return BLK_QC_T_NONE;
312 }
313
314 /* mddev_suspend makes sure no new requests are submitted
315  * to the device, and that any requests that have been submitted
316  * are completely handled.
317  * Once mddev_detach() is called and completes, the module will be
318  * completely unused.
319  */
320 void mddev_suspend(struct mddev *mddev)
321 {
322         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
323         if (mddev->suspended++)
324                 return;
325         synchronize_rcu();
326         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
327         mddev->pers->quiesce(mddev, 1);
328
329         del_timer_sync(&mddev->safemode_timer);
330 }
331 EXPORT_SYMBOL_GPL(mddev_suspend);
332
333 void mddev_resume(struct mddev *mddev)
334 {
335         if (--mddev->suspended)
336                 return;
337         wake_up(&mddev->sb_wait);
338         mddev->pers->quiesce(mddev, 0);
339
340         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
341         md_wakeup_thread(mddev->thread);
342         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
343 }
344 EXPORT_SYMBOL_GPL(mddev_resume);
345
346 int mddev_congested(struct mddev *mddev, int bits)
347 {
348         struct md_personality *pers = mddev->pers;
349         int ret = 0;
350
351         rcu_read_lock();
352         if (mddev->suspended)
353                 ret = 1;
354         else if (pers && pers->congested)
355                 ret = pers->congested(mddev, bits);
356         rcu_read_unlock();
357         return ret;
358 }
359 EXPORT_SYMBOL_GPL(mddev_congested);
360 static int md_congested(void *data, int bits)
361 {
362         struct mddev *mddev = data;
363         return mddev_congested(mddev, bits);
364 }
365
366 /*
367  * Generic flush handling for md
368  */
369
370 static void md_end_flush(struct bio *bio)
371 {
372         struct md_rdev *rdev = bio->bi_private;
373         struct mddev *mddev = rdev->mddev;
374
375         rdev_dec_pending(rdev, mddev);
376
377         if (atomic_dec_and_test(&mddev->flush_pending)) {
378                 /* The pre-request flush has finished */
379                 queue_work(md_wq, &mddev->flush_work);
380         }
381         bio_put(bio);
382 }
383
384 static void md_submit_flush_data(struct work_struct *ws);
385
386 static void submit_flushes(struct work_struct *ws)
387 {
388         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
389         struct md_rdev *rdev;
390
391         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
392         atomic_set(&mddev->flush_pending, 1);
393         rcu_read_lock();
394         rdev_for_each_rcu(rdev, mddev)
395                 if (rdev->raid_disk >= 0 &&
396                     !test_bit(Faulty, &rdev->flags)) {
397                         /* Take two references, one is dropped
398                          * when request finishes, one after
399                          * we reclaim rcu_read_lock
400                          */
401                         struct bio *bi;
402                         atomic_inc(&rdev->nr_pending);
403                         atomic_inc(&rdev->nr_pending);
404                         rcu_read_unlock();
405                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
406                         bi->bi_end_io = md_end_flush;
407                         bi->bi_private = rdev;
408                         bi->bi_bdev = rdev->bdev;
409                         bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
410                         atomic_inc(&mddev->flush_pending);
411                         submit_bio(bi);
412                         rcu_read_lock();
413                         rdev_dec_pending(rdev, mddev);
414                 }
415         rcu_read_unlock();
416         if (atomic_dec_and_test(&mddev->flush_pending))
417                 queue_work(md_wq, &mddev->flush_work);
418 }
419
420 static void md_submit_flush_data(struct work_struct *ws)
421 {
422         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
423         struct bio *bio = mddev->flush_bio;
424
425         if (bio->bi_iter.bi_size == 0)
426                 /* an empty barrier - all done */
427                 bio_endio(bio);
428         else {
429                 bio->bi_opf &= ~REQ_PREFLUSH;
430                 mddev->pers->make_request(mddev, bio);
431         }
432
433         mddev->flush_bio = NULL;
434         wake_up(&mddev->sb_wait);
435 }
436
437 void md_flush_request(struct mddev *mddev, struct bio *bio)
438 {
439         spin_lock_irq(&mddev->lock);
440         wait_event_lock_irq(mddev->sb_wait,
441                             !mddev->flush_bio,
442                             mddev->lock);
443         mddev->flush_bio = bio;
444         spin_unlock_irq(&mddev->lock);
445
446         INIT_WORK(&mddev->flush_work, submit_flushes);
447         queue_work(md_wq, &mddev->flush_work);
448 }
449 EXPORT_SYMBOL(md_flush_request);
450
451 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
452 {
453         struct mddev *mddev = cb->data;
454         md_wakeup_thread(mddev->thread);
455         kfree(cb);
456 }
457 EXPORT_SYMBOL(md_unplug);
458
459 static inline struct mddev *mddev_get(struct mddev *mddev)
460 {
461         atomic_inc(&mddev->active);
462         return mddev;
463 }
464
465 static void mddev_delayed_delete(struct work_struct *ws);
466
467 static void mddev_put(struct mddev *mddev)
468 {
469         struct bio_set *bs = NULL;
470
471         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
472                 return;
473         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
474             mddev->ctime == 0 && !mddev->hold_active) {
475                 /* Array is not configured at all, and not held active,
476                  * so destroy it */
477                 list_del_init(&mddev->all_mddevs);
478                 bs = mddev->bio_set;
479                 mddev->bio_set = NULL;
480                 if (mddev->gendisk) {
481                         /* We did a probe so need to clean up.  Call
482                          * queue_work inside the spinlock so that
483                          * flush_workqueue() after mddev_find will
484                          * succeed in waiting for the work to be done.
485                          */
486                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
487                         queue_work(md_misc_wq, &mddev->del_work);
488                 } else
489                         kfree(mddev);
490         }
491         spin_unlock(&all_mddevs_lock);
492         if (bs)
493                 bioset_free(bs);
494 }
495
496 static void md_safemode_timeout(unsigned long data);
497
498 void mddev_init(struct mddev *mddev)
499 {
500         mutex_init(&mddev->open_mutex);
501         mutex_init(&mddev->reconfig_mutex);
502         mutex_init(&mddev->bitmap_info.mutex);
503         INIT_LIST_HEAD(&mddev->disks);
504         INIT_LIST_HEAD(&mddev->all_mddevs);
505         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
506                     (unsigned long) mddev);
507         atomic_set(&mddev->active, 1);
508         atomic_set(&mddev->openers, 0);
509         atomic_set(&mddev->active_io, 0);
510         spin_lock_init(&mddev->lock);
511         atomic_set(&mddev->flush_pending, 0);
512         init_waitqueue_head(&mddev->sb_wait);
513         init_waitqueue_head(&mddev->recovery_wait);
514         mddev->reshape_position = MaxSector;
515         mddev->reshape_backwards = 0;
516         mddev->last_sync_action = "none";
517         mddev->resync_min = 0;
518         mddev->resync_max = MaxSector;
519         mddev->level = LEVEL_NONE;
520 }
521 EXPORT_SYMBOL_GPL(mddev_init);
522
523 static struct mddev *mddev_find(dev_t unit)
524 {
525         struct mddev *mddev, *new = NULL;
526
527         if (unit && MAJOR(unit) != MD_MAJOR)
528                 unit &= ~((1<<MdpMinorShift)-1);
529
530  retry:
531         spin_lock(&all_mddevs_lock);
532
533         if (unit) {
534                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
535                         if (mddev->unit == unit) {
536                                 mddev_get(mddev);
537                                 spin_unlock(&all_mddevs_lock);
538                                 kfree(new);
539                                 return mddev;
540                         }
541
542                 if (new) {
543                         list_add(&new->all_mddevs, &all_mddevs);
544                         spin_unlock(&all_mddevs_lock);
545                         new->hold_active = UNTIL_IOCTL;
546                         return new;
547                 }
548         } else if (new) {
549                 /* find an unused unit number */
550                 static int next_minor = 512;
551                 int start = next_minor;
552                 int is_free = 0;
553                 int dev = 0;
554                 while (!is_free) {
555                         dev = MKDEV(MD_MAJOR, next_minor);
556                         next_minor++;
557                         if (next_minor > MINORMASK)
558                                 next_minor = 0;
559                         if (next_minor == start) {
560                                 /* Oh dear, all in use. */
561                                 spin_unlock(&all_mddevs_lock);
562                                 kfree(new);
563                                 return NULL;
564                         }
565
566                         is_free = 1;
567                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
568                                 if (mddev->unit == dev) {
569                                         is_free = 0;
570                                         break;
571                                 }
572                 }
573                 new->unit = dev;
574                 new->md_minor = MINOR(dev);
575                 new->hold_active = UNTIL_STOP;
576                 list_add(&new->all_mddevs, &all_mddevs);
577                 spin_unlock(&all_mddevs_lock);
578                 return new;
579         }
580         spin_unlock(&all_mddevs_lock);
581
582         new = kzalloc(sizeof(*new), GFP_KERNEL);
583         if (!new)
584                 return NULL;
585
586         new->unit = unit;
587         if (MAJOR(unit) == MD_MAJOR)
588                 new->md_minor = MINOR(unit);
589         else
590                 new->md_minor = MINOR(unit) >> MdpMinorShift;
591
592         mddev_init(new);
593
594         goto retry;
595 }
596
597 static struct attribute_group md_redundancy_group;
598
599 void mddev_unlock(struct mddev *mddev)
600 {
601         if (mddev->to_remove) {
602                 /* These cannot be removed under reconfig_mutex as
603                  * an access to the files will try to take reconfig_mutex
604                  * while holding the file unremovable, which leads to
605                  * a deadlock.
606                  * So hold set sysfs_active while the remove in happeing,
607                  * and anything else which might set ->to_remove or my
608                  * otherwise change the sysfs namespace will fail with
609                  * -EBUSY if sysfs_active is still set.
610                  * We set sysfs_active under reconfig_mutex and elsewhere
611                  * test it under the same mutex to ensure its correct value
612                  * is seen.
613                  */
614                 struct attribute_group *to_remove = mddev->to_remove;
615                 mddev->to_remove = NULL;
616                 mddev->sysfs_active = 1;
617                 mutex_unlock(&mddev->reconfig_mutex);
618
619                 if (mddev->kobj.sd) {
620                         if (to_remove != &md_redundancy_group)
621                                 sysfs_remove_group(&mddev->kobj, to_remove);
622                         if (mddev->pers == NULL ||
623                             mddev->pers->sync_request == NULL) {
624                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
625                                 if (mddev->sysfs_action)
626                                         sysfs_put(mddev->sysfs_action);
627                                 mddev->sysfs_action = NULL;
628                         }
629                 }
630                 mddev->sysfs_active = 0;
631         } else
632                 mutex_unlock(&mddev->reconfig_mutex);
633
634         /* As we've dropped the mutex we need a spinlock to
635          * make sure the thread doesn't disappear
636          */
637         spin_lock(&pers_lock);
638         md_wakeup_thread(mddev->thread);
639         spin_unlock(&pers_lock);
640 }
641 EXPORT_SYMBOL_GPL(mddev_unlock);
642
643 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
644 {
645         struct md_rdev *rdev;
646
647         rdev_for_each_rcu(rdev, mddev)
648                 if (rdev->desc_nr == nr)
649                         return rdev;
650
651         return NULL;
652 }
653 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
654
655 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
656 {
657         struct md_rdev *rdev;
658
659         rdev_for_each(rdev, mddev)
660                 if (rdev->bdev->bd_dev == dev)
661                         return rdev;
662
663         return NULL;
664 }
665
666 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
667 {
668         struct md_rdev *rdev;
669
670         rdev_for_each_rcu(rdev, mddev)
671                 if (rdev->bdev->bd_dev == dev)
672                         return rdev;
673
674         return NULL;
675 }
676
677 static struct md_personality *find_pers(int level, char *clevel)
678 {
679         struct md_personality *pers;
680         list_for_each_entry(pers, &pers_list, list) {
681                 if (level != LEVEL_NONE && pers->level == level)
682                         return pers;
683                 if (strcmp(pers->name, clevel)==0)
684                         return pers;
685         }
686         return NULL;
687 }
688
689 /* return the offset of the super block in 512byte sectors */
690 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
691 {
692         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
693         return MD_NEW_SIZE_SECTORS(num_sectors);
694 }
695
696 static int alloc_disk_sb(struct md_rdev *rdev)
697 {
698         rdev->sb_page = alloc_page(GFP_KERNEL);
699         if (!rdev->sb_page)
700                 return -ENOMEM;
701         return 0;
702 }
703
704 void md_rdev_clear(struct md_rdev *rdev)
705 {
706         if (rdev->sb_page) {
707                 put_page(rdev->sb_page);
708                 rdev->sb_loaded = 0;
709                 rdev->sb_page = NULL;
710                 rdev->sb_start = 0;
711                 rdev->sectors = 0;
712         }
713         if (rdev->bb_page) {
714                 put_page(rdev->bb_page);
715                 rdev->bb_page = NULL;
716         }
717         badblocks_exit(&rdev->badblocks);
718 }
719 EXPORT_SYMBOL_GPL(md_rdev_clear);
720
721 static void super_written(struct bio *bio)
722 {
723         struct md_rdev *rdev = bio->bi_private;
724         struct mddev *mddev = rdev->mddev;
725
726         if (bio->bi_error) {
727                 pr_err("md: super_written gets error=%d\n", bio->bi_error);
728                 md_error(mddev, rdev);
729         }
730
731         if (atomic_dec_and_test(&mddev->pending_writes))
732                 wake_up(&mddev->sb_wait);
733         rdev_dec_pending(rdev, mddev);
734         bio_put(bio);
735 }
736
737 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
738                    sector_t sector, int size, struct page *page)
739 {
740         /* write first size bytes of page to sector of rdev
741          * Increment mddev->pending_writes before returning
742          * and decrement it on completion, waking up sb_wait
743          * if zero is reached.
744          * If an error occurred, call md_error
745          */
746         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
747
748         atomic_inc(&rdev->nr_pending);
749
750         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
751         bio->bi_iter.bi_sector = sector;
752         bio_add_page(bio, page, size, 0);
753         bio->bi_private = rdev;
754         bio->bi_end_io = super_written;
755         bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
756
757         atomic_inc(&mddev->pending_writes);
758         submit_bio(bio);
759 }
760
761 void md_super_wait(struct mddev *mddev)
762 {
763         /* wait for all superblock writes that were scheduled to complete */
764         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
765 }
766
767 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
768                  struct page *page, int op, int op_flags, bool metadata_op)
769 {
770         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
771         int ret;
772
773         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
774                 rdev->meta_bdev : rdev->bdev;
775         bio_set_op_attrs(bio, op, op_flags);
776         if (metadata_op)
777                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
778         else if (rdev->mddev->reshape_position != MaxSector &&
779                  (rdev->mddev->reshape_backwards ==
780                   (sector >= rdev->mddev->reshape_position)))
781                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
782         else
783                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
784         bio_add_page(bio, page, size, 0);
785
786         submit_bio_wait(bio);
787
788         ret = !bio->bi_error;
789         bio_put(bio);
790         return ret;
791 }
792 EXPORT_SYMBOL_GPL(sync_page_io);
793
794 static int read_disk_sb(struct md_rdev *rdev, int size)
795 {
796         char b[BDEVNAME_SIZE];
797
798         if (rdev->sb_loaded)
799                 return 0;
800
801         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
802                 goto fail;
803         rdev->sb_loaded = 1;
804         return 0;
805
806 fail:
807         pr_err("md: disabled device %s, could not read superblock.\n",
808                bdevname(rdev->bdev,b));
809         return -EINVAL;
810 }
811
812 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
813 {
814         return  sb1->set_uuid0 == sb2->set_uuid0 &&
815                 sb1->set_uuid1 == sb2->set_uuid1 &&
816                 sb1->set_uuid2 == sb2->set_uuid2 &&
817                 sb1->set_uuid3 == sb2->set_uuid3;
818 }
819
820 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
821 {
822         int ret;
823         mdp_super_t *tmp1, *tmp2;
824
825         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
826         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
827
828         if (!tmp1 || !tmp2) {
829                 ret = 0;
830                 goto abort;
831         }
832
833         *tmp1 = *sb1;
834         *tmp2 = *sb2;
835
836         /*
837          * nr_disks is not constant
838          */
839         tmp1->nr_disks = 0;
840         tmp2->nr_disks = 0;
841
842         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
843 abort:
844         kfree(tmp1);
845         kfree(tmp2);
846         return ret;
847 }
848
849 static u32 md_csum_fold(u32 csum)
850 {
851         csum = (csum & 0xffff) + (csum >> 16);
852         return (csum & 0xffff) + (csum >> 16);
853 }
854
855 static unsigned int calc_sb_csum(mdp_super_t *sb)
856 {
857         u64 newcsum = 0;
858         u32 *sb32 = (u32*)sb;
859         int i;
860         unsigned int disk_csum, csum;
861
862         disk_csum = sb->sb_csum;
863         sb->sb_csum = 0;
864
865         for (i = 0; i < MD_SB_BYTES/4 ; i++)
866                 newcsum += sb32[i];
867         csum = (newcsum & 0xffffffff) + (newcsum>>32);
868
869 #ifdef CONFIG_ALPHA
870         /* This used to use csum_partial, which was wrong for several
871          * reasons including that different results are returned on
872          * different architectures.  It isn't critical that we get exactly
873          * the same return value as before (we always csum_fold before
874          * testing, and that removes any differences).  However as we
875          * know that csum_partial always returned a 16bit value on
876          * alphas, do a fold to maximise conformity to previous behaviour.
877          */
878         sb->sb_csum = md_csum_fold(disk_csum);
879 #else
880         sb->sb_csum = disk_csum;
881 #endif
882         return csum;
883 }
884
885 /*
886  * Handle superblock details.
887  * We want to be able to handle multiple superblock formats
888  * so we have a common interface to them all, and an array of
889  * different handlers.
890  * We rely on user-space to write the initial superblock, and support
891  * reading and updating of superblocks.
892  * Interface methods are:
893  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
894  *      loads and validates a superblock on dev.
895  *      if refdev != NULL, compare superblocks on both devices
896  *    Return:
897  *      0 - dev has a superblock that is compatible with refdev
898  *      1 - dev has a superblock that is compatible and newer than refdev
899  *          so dev should be used as the refdev in future
900  *     -EINVAL superblock incompatible or invalid
901  *     -othererror e.g. -EIO
902  *
903  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
904  *      Verify that dev is acceptable into mddev.
905  *       The first time, mddev->raid_disks will be 0, and data from
906  *       dev should be merged in.  Subsequent calls check that dev
907  *       is new enough.  Return 0 or -EINVAL
908  *
909  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
910  *     Update the superblock for rdev with data in mddev
911  *     This does not write to disc.
912  *
913  */
914
915 struct super_type  {
916         char                *name;
917         struct module       *owner;
918         int                 (*load_super)(struct md_rdev *rdev,
919                                           struct md_rdev *refdev,
920                                           int minor_version);
921         int                 (*validate_super)(struct mddev *mddev,
922                                               struct md_rdev *rdev);
923         void                (*sync_super)(struct mddev *mddev,
924                                           struct md_rdev *rdev);
925         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
926                                                 sector_t num_sectors);
927         int                 (*allow_new_offset)(struct md_rdev *rdev,
928                                                 unsigned long long new_offset);
929 };
930
931 /*
932  * Check that the given mddev has no bitmap.
933  *
934  * This function is called from the run method of all personalities that do not
935  * support bitmaps. It prints an error message and returns non-zero if mddev
936  * has a bitmap. Otherwise, it returns 0.
937  *
938  */
939 int md_check_no_bitmap(struct mddev *mddev)
940 {
941         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
942                 return 0;
943         pr_warn("%s: bitmaps are not supported for %s\n",
944                 mdname(mddev), mddev->pers->name);
945         return 1;
946 }
947 EXPORT_SYMBOL(md_check_no_bitmap);
948
949 /*
950  * load_super for 0.90.0
951  */
952 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
953 {
954         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
955         mdp_super_t *sb;
956         int ret;
957
958         /*
959          * Calculate the position of the superblock (512byte sectors),
960          * it's at the end of the disk.
961          *
962          * It also happens to be a multiple of 4Kb.
963          */
964         rdev->sb_start = calc_dev_sboffset(rdev);
965
966         ret = read_disk_sb(rdev, MD_SB_BYTES);
967         if (ret)
968                 return ret;
969
970         ret = -EINVAL;
971
972         bdevname(rdev->bdev, b);
973         sb = page_address(rdev->sb_page);
974
975         if (sb->md_magic != MD_SB_MAGIC) {
976                 pr_warn("md: invalid raid superblock magic on %s\n", b);
977                 goto abort;
978         }
979
980         if (sb->major_version != 0 ||
981             sb->minor_version < 90 ||
982             sb->minor_version > 91) {
983                 pr_warn("Bad version number %d.%d on %s\n",
984                         sb->major_version, sb->minor_version, b);
985                 goto abort;
986         }
987
988         if (sb->raid_disks <= 0)
989                 goto abort;
990
991         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
992                 pr_warn("md: invalid superblock checksum on %s\n", b);
993                 goto abort;
994         }
995
996         rdev->preferred_minor = sb->md_minor;
997         rdev->data_offset = 0;
998         rdev->new_data_offset = 0;
999         rdev->sb_size = MD_SB_BYTES;
1000         rdev->badblocks.shift = -1;
1001
1002         if (sb->level == LEVEL_MULTIPATH)
1003                 rdev->desc_nr = -1;
1004         else
1005                 rdev->desc_nr = sb->this_disk.number;
1006
1007         if (!refdev) {
1008                 ret = 1;
1009         } else {
1010                 __u64 ev1, ev2;
1011                 mdp_super_t *refsb = page_address(refdev->sb_page);
1012                 if (!uuid_equal(refsb, sb)) {
1013                         pr_warn("md: %s has different UUID to %s\n",
1014                                 b, bdevname(refdev->bdev,b2));
1015                         goto abort;
1016                 }
1017                 if (!sb_equal(refsb, sb)) {
1018                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1019                                 b, bdevname(refdev->bdev, b2));
1020                         goto abort;
1021                 }
1022                 ev1 = md_event(sb);
1023                 ev2 = md_event(refsb);
1024                 if (ev1 > ev2)
1025                         ret = 1;
1026                 else
1027                         ret = 0;
1028         }
1029         rdev->sectors = rdev->sb_start;
1030         /* Limit to 4TB as metadata cannot record more than that.
1031          * (not needed for Linear and RAID0 as metadata doesn't
1032          * record this size)
1033          */
1034         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1035             sb->level >= 1)
1036                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1037
1038         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1039                 /* "this cannot possibly happen" ... */
1040                 ret = -EINVAL;
1041
1042  abort:
1043         return ret;
1044 }
1045
1046 /*
1047  * validate_super for 0.90.0
1048  */
1049 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1050 {
1051         mdp_disk_t *desc;
1052         mdp_super_t *sb = page_address(rdev->sb_page);
1053         __u64 ev1 = md_event(sb);
1054
1055         rdev->raid_disk = -1;
1056         clear_bit(Faulty, &rdev->flags);
1057         clear_bit(In_sync, &rdev->flags);
1058         clear_bit(Bitmap_sync, &rdev->flags);
1059         clear_bit(WriteMostly, &rdev->flags);
1060
1061         if (mddev->raid_disks == 0) {
1062                 mddev->major_version = 0;
1063                 mddev->minor_version = sb->minor_version;
1064                 mddev->patch_version = sb->patch_version;
1065                 mddev->external = 0;
1066                 mddev->chunk_sectors = sb->chunk_size >> 9;
1067                 mddev->ctime = sb->ctime;
1068                 mddev->utime = sb->utime;
1069                 mddev->level = sb->level;
1070                 mddev->clevel[0] = 0;
1071                 mddev->layout = sb->layout;
1072                 mddev->raid_disks = sb->raid_disks;
1073                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1074                 mddev->events = ev1;
1075                 mddev->bitmap_info.offset = 0;
1076                 mddev->bitmap_info.space = 0;
1077                 /* bitmap can use 60 K after the 4K superblocks */
1078                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1079                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1080                 mddev->reshape_backwards = 0;
1081
1082                 if (mddev->minor_version >= 91) {
1083                         mddev->reshape_position = sb->reshape_position;
1084                         mddev->delta_disks = sb->delta_disks;
1085                         mddev->new_level = sb->new_level;
1086                         mddev->new_layout = sb->new_layout;
1087                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1088                         if (mddev->delta_disks < 0)
1089                                 mddev->reshape_backwards = 1;
1090                 } else {
1091                         mddev->reshape_position = MaxSector;
1092                         mddev->delta_disks = 0;
1093                         mddev->new_level = mddev->level;
1094                         mddev->new_layout = mddev->layout;
1095                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1096                 }
1097
1098                 if (sb->state & (1<<MD_SB_CLEAN))
1099                         mddev->recovery_cp = MaxSector;
1100                 else {
1101                         if (sb->events_hi == sb->cp_events_hi &&
1102                                 sb->events_lo == sb->cp_events_lo) {
1103                                 mddev->recovery_cp = sb->recovery_cp;
1104                         } else
1105                                 mddev->recovery_cp = 0;
1106                 }
1107
1108                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1109                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1110                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1111                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1112
1113                 mddev->max_disks = MD_SB_DISKS;
1114
1115                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1116                     mddev->bitmap_info.file == NULL) {
1117                         mddev->bitmap_info.offset =
1118                                 mddev->bitmap_info.default_offset;
1119                         mddev->bitmap_info.space =
1120                                 mddev->bitmap_info.default_space;
1121                 }
1122
1123         } else if (mddev->pers == NULL) {
1124                 /* Insist on good event counter while assembling, except
1125                  * for spares (which don't need an event count) */
1126                 ++ev1;
1127                 if (sb->disks[rdev->desc_nr].state & (
1128                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1129                         if (ev1 < mddev->events)
1130                                 return -EINVAL;
1131         } else if (mddev->bitmap) {
1132                 /* if adding to array with a bitmap, then we can accept an
1133                  * older device ... but not too old.
1134                  */
1135                 if (ev1 < mddev->bitmap->events_cleared)
1136                         return 0;
1137                 if (ev1 < mddev->events)
1138                         set_bit(Bitmap_sync, &rdev->flags);
1139         } else {
1140                 if (ev1 < mddev->events)
1141                         /* just a hot-add of a new device, leave raid_disk at -1 */
1142                         return 0;
1143         }
1144
1145         if (mddev->level != LEVEL_MULTIPATH) {
1146                 desc = sb->disks + rdev->desc_nr;
1147
1148                 if (desc->state & (1<<MD_DISK_FAULTY))
1149                         set_bit(Faulty, &rdev->flags);
1150                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1151                             desc->raid_disk < mddev->raid_disks */) {
1152                         set_bit(In_sync, &rdev->flags);
1153                         rdev->raid_disk = desc->raid_disk;
1154                         rdev->saved_raid_disk = desc->raid_disk;
1155                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1156                         /* active but not in sync implies recovery up to
1157                          * reshape position.  We don't know exactly where
1158                          * that is, so set to zero for now */
1159                         if (mddev->minor_version >= 91) {
1160                                 rdev->recovery_offset = 0;
1161                                 rdev->raid_disk = desc->raid_disk;
1162                         }
1163                 }
1164                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1165                         set_bit(WriteMostly, &rdev->flags);
1166         } else /* MULTIPATH are always insync */
1167                 set_bit(In_sync, &rdev->flags);
1168         return 0;
1169 }
1170
1171 /*
1172  * sync_super for 0.90.0
1173  */
1174 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1175 {
1176         mdp_super_t *sb;
1177         struct md_rdev *rdev2;
1178         int next_spare = mddev->raid_disks;
1179
1180         /* make rdev->sb match mddev data..
1181          *
1182          * 1/ zero out disks
1183          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1184          * 3/ any empty disks < next_spare become removed
1185          *
1186          * disks[0] gets initialised to REMOVED because
1187          * we cannot be sure from other fields if it has
1188          * been initialised or not.
1189          */
1190         int i;
1191         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1192
1193         rdev->sb_size = MD_SB_BYTES;
1194
1195         sb = page_address(rdev->sb_page);
1196
1197         memset(sb, 0, sizeof(*sb));
1198
1199         sb->md_magic = MD_SB_MAGIC;
1200         sb->major_version = mddev->major_version;
1201         sb->patch_version = mddev->patch_version;
1202         sb->gvalid_words  = 0; /* ignored */
1203         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1204         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1205         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1206         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1207
1208         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1209         sb->level = mddev->level;
1210         sb->size = mddev->dev_sectors / 2;
1211         sb->raid_disks = mddev->raid_disks;
1212         sb->md_minor = mddev->md_minor;
1213         sb->not_persistent = 0;
1214         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1215         sb->state = 0;
1216         sb->events_hi = (mddev->events>>32);
1217         sb->events_lo = (u32)mddev->events;
1218
1219         if (mddev->reshape_position == MaxSector)
1220                 sb->minor_version = 90;
1221         else {
1222                 sb->minor_version = 91;
1223                 sb->reshape_position = mddev->reshape_position;
1224                 sb->new_level = mddev->new_level;
1225                 sb->delta_disks = mddev->delta_disks;
1226                 sb->new_layout = mddev->new_layout;
1227                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1228         }
1229         mddev->minor_version = sb->minor_version;
1230         if (mddev->in_sync)
1231         {
1232                 sb->recovery_cp = mddev->recovery_cp;
1233                 sb->cp_events_hi = (mddev->events>>32);
1234                 sb->cp_events_lo = (u32)mddev->events;
1235                 if (mddev->recovery_cp == MaxSector)
1236                         sb->state = (1<< MD_SB_CLEAN);
1237         } else
1238                 sb->recovery_cp = 0;
1239
1240         sb->layout = mddev->layout;
1241         sb->chunk_size = mddev->chunk_sectors << 9;
1242
1243         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1244                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1245
1246         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1247         rdev_for_each(rdev2, mddev) {
1248                 mdp_disk_t *d;
1249                 int desc_nr;
1250                 int is_active = test_bit(In_sync, &rdev2->flags);
1251
1252                 if (rdev2->raid_disk >= 0 &&
1253                     sb->minor_version >= 91)
1254                         /* we have nowhere to store the recovery_offset,
1255                          * but if it is not below the reshape_position,
1256                          * we can piggy-back on that.
1257                          */
1258                         is_active = 1;
1259                 if (rdev2->raid_disk < 0 ||
1260                     test_bit(Faulty, &rdev2->flags))
1261                         is_active = 0;
1262                 if (is_active)
1263                         desc_nr = rdev2->raid_disk;
1264                 else
1265                         desc_nr = next_spare++;
1266                 rdev2->desc_nr = desc_nr;
1267                 d = &sb->disks[rdev2->desc_nr];
1268                 nr_disks++;
1269                 d->number = rdev2->desc_nr;
1270                 d->major = MAJOR(rdev2->bdev->bd_dev);
1271                 d->minor = MINOR(rdev2->bdev->bd_dev);
1272                 if (is_active)
1273                         d->raid_disk = rdev2->raid_disk;
1274                 else
1275                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1276                 if (test_bit(Faulty, &rdev2->flags))
1277                         d->state = (1<<MD_DISK_FAULTY);
1278                 else if (is_active) {
1279                         d->state = (1<<MD_DISK_ACTIVE);
1280                         if (test_bit(In_sync, &rdev2->flags))
1281                                 d->state |= (1<<MD_DISK_SYNC);
1282                         active++;
1283                         working++;
1284                 } else {
1285                         d->state = 0;
1286                         spare++;
1287                         working++;
1288                 }
1289                 if (test_bit(WriteMostly, &rdev2->flags))
1290                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1291         }
1292         /* now set the "removed" and "faulty" bits on any missing devices */
1293         for (i=0 ; i < mddev->raid_disks ; i++) {
1294                 mdp_disk_t *d = &sb->disks[i];
1295                 if (d->state == 0 && d->number == 0) {
1296                         d->number = i;
1297                         d->raid_disk = i;
1298                         d->state = (1<<MD_DISK_REMOVED);
1299                         d->state |= (1<<MD_DISK_FAULTY);
1300                         failed++;
1301                 }
1302         }
1303         sb->nr_disks = nr_disks;
1304         sb->active_disks = active;
1305         sb->working_disks = working;
1306         sb->failed_disks = failed;
1307         sb->spare_disks = spare;
1308
1309         sb->this_disk = sb->disks[rdev->desc_nr];
1310         sb->sb_csum = calc_sb_csum(sb);
1311 }
1312
1313 /*
1314  * rdev_size_change for 0.90.0
1315  */
1316 static unsigned long long
1317 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1318 {
1319         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1320                 return 0; /* component must fit device */
1321         if (rdev->mddev->bitmap_info.offset)
1322                 return 0; /* can't move bitmap */
1323         rdev->sb_start = calc_dev_sboffset(rdev);
1324         if (!num_sectors || num_sectors > rdev->sb_start)
1325                 num_sectors = rdev->sb_start;
1326         /* Limit to 4TB as metadata cannot record more than that.
1327          * 4TB == 2^32 KB, or 2*2^32 sectors.
1328          */
1329         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1330             rdev->mddev->level >= 1)
1331                 num_sectors = (sector_t)(2ULL << 32) - 2;
1332         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1333                        rdev->sb_page);
1334         md_super_wait(rdev->mddev);
1335         return num_sectors;
1336 }
1337
1338 static int
1339 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1340 {
1341         /* non-zero offset changes not possible with v0.90 */
1342         return new_offset == 0;
1343 }
1344
1345 /*
1346  * version 1 superblock
1347  */
1348
1349 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1350 {
1351         __le32 disk_csum;
1352         u32 csum;
1353         unsigned long long newcsum;
1354         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1355         __le32 *isuper = (__le32*)sb;
1356
1357         disk_csum = sb->sb_csum;
1358         sb->sb_csum = 0;
1359         newcsum = 0;
1360         for (; size >= 4; size -= 4)
1361                 newcsum += le32_to_cpu(*isuper++);
1362
1363         if (size == 2)
1364                 newcsum += le16_to_cpu(*(__le16*) isuper);
1365
1366         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1367         sb->sb_csum = disk_csum;
1368         return cpu_to_le32(csum);
1369 }
1370
1371 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1372 {
1373         struct mdp_superblock_1 *sb;
1374         int ret;
1375         sector_t sb_start;
1376         sector_t sectors;
1377         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1378         int bmask;
1379
1380         /*
1381          * Calculate the position of the superblock in 512byte sectors.
1382          * It is always aligned to a 4K boundary and
1383          * depeding on minor_version, it can be:
1384          * 0: At least 8K, but less than 12K, from end of device
1385          * 1: At start of device
1386          * 2: 4K from start of device.
1387          */
1388         switch(minor_version) {
1389         case 0:
1390                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1391                 sb_start -= 8*2;
1392                 sb_start &= ~(sector_t)(4*2-1);
1393                 break;
1394         case 1:
1395                 sb_start = 0;
1396                 break;
1397         case 2:
1398                 sb_start = 8;
1399                 break;
1400         default:
1401                 return -EINVAL;
1402         }
1403         rdev->sb_start = sb_start;
1404
1405         /* superblock is rarely larger than 1K, but it can be larger,
1406          * and it is safe to read 4k, so we do that
1407          */
1408         ret = read_disk_sb(rdev, 4096);
1409         if (ret) return ret;
1410
1411         sb = page_address(rdev->sb_page);
1412
1413         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1414             sb->major_version != cpu_to_le32(1) ||
1415             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1416             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1417             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1418                 return -EINVAL;
1419
1420         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1421                 pr_warn("md: invalid superblock checksum on %s\n",
1422                         bdevname(rdev->bdev,b));
1423                 return -EINVAL;
1424         }
1425         if (le64_to_cpu(sb->data_size) < 10) {
1426                 pr_warn("md: data_size too small on %s\n",
1427                         bdevname(rdev->bdev,b));
1428                 return -EINVAL;
1429         }
1430         if (sb->pad0 ||
1431             sb->pad3[0] ||
1432             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1433                 /* Some padding is non-zero, might be a new feature */
1434                 return -EINVAL;
1435
1436         rdev->preferred_minor = 0xffff;
1437         rdev->data_offset = le64_to_cpu(sb->data_offset);
1438         rdev->new_data_offset = rdev->data_offset;
1439         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1440             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1441                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1442         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1443
1444         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1445         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1446         if (rdev->sb_size & bmask)
1447                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1448
1449         if (minor_version
1450             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1451                 return -EINVAL;
1452         if (minor_version
1453             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1454                 return -EINVAL;
1455
1456         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1457                 rdev->desc_nr = -1;
1458         else
1459                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1460
1461         if (!rdev->bb_page) {
1462                 rdev->bb_page = alloc_page(GFP_KERNEL);
1463                 if (!rdev->bb_page)
1464                         return -ENOMEM;
1465         }
1466         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1467             rdev->badblocks.count == 0) {
1468                 /* need to load the bad block list.
1469                  * Currently we limit it to one page.
1470                  */
1471                 s32 offset;
1472                 sector_t bb_sector;
1473                 u64 *bbp;
1474                 int i;
1475                 int sectors = le16_to_cpu(sb->bblog_size);
1476                 if (sectors > (PAGE_SIZE / 512))
1477                         return -EINVAL;
1478                 offset = le32_to_cpu(sb->bblog_offset);
1479                 if (offset == 0)
1480                         return -EINVAL;
1481                 bb_sector = (long long)offset;
1482                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1483                                   rdev->bb_page, REQ_OP_READ, 0, true))
1484                         return -EIO;
1485                 bbp = (u64 *)page_address(rdev->bb_page);
1486                 rdev->badblocks.shift = sb->bblog_shift;
1487                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1488                         u64 bb = le64_to_cpu(*bbp);
1489                         int count = bb & (0x3ff);
1490                         u64 sector = bb >> 10;
1491                         sector <<= sb->bblog_shift;
1492                         count <<= sb->bblog_shift;
1493                         if (bb + 1 == 0)
1494                                 break;
1495                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1496                                 return -EINVAL;
1497                 }
1498         } else if (sb->bblog_offset != 0)
1499                 rdev->badblocks.shift = 0;
1500
1501         if (!refdev) {
1502                 ret = 1;
1503         } else {
1504                 __u64 ev1, ev2;
1505                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1506
1507                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1508                     sb->level != refsb->level ||
1509                     sb->layout != refsb->layout ||
1510                     sb->chunksize != refsb->chunksize) {
1511                         pr_warn("md: %s has strangely different superblock to %s\n",
1512                                 bdevname(rdev->bdev,b),
1513                                 bdevname(refdev->bdev,b2));
1514                         return -EINVAL;
1515                 }
1516                 ev1 = le64_to_cpu(sb->events);
1517                 ev2 = le64_to_cpu(refsb->events);
1518
1519                 if (ev1 > ev2)
1520                         ret = 1;
1521                 else
1522                         ret = 0;
1523         }
1524         if (minor_version) {
1525                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1526                 sectors -= rdev->data_offset;
1527         } else
1528                 sectors = rdev->sb_start;
1529         if (sectors < le64_to_cpu(sb->data_size))
1530                 return -EINVAL;
1531         rdev->sectors = le64_to_cpu(sb->data_size);
1532         return ret;
1533 }
1534
1535 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1536 {
1537         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1538         __u64 ev1 = le64_to_cpu(sb->events);
1539
1540         rdev->raid_disk = -1;
1541         clear_bit(Faulty, &rdev->flags);
1542         clear_bit(In_sync, &rdev->flags);
1543         clear_bit(Bitmap_sync, &rdev->flags);
1544         clear_bit(WriteMostly, &rdev->flags);
1545
1546         if (mddev->raid_disks == 0) {
1547                 mddev->major_version = 1;
1548                 mddev->patch_version = 0;
1549                 mddev->external = 0;
1550                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1551                 mddev->ctime = le64_to_cpu(sb->ctime);
1552                 mddev->utime = le64_to_cpu(sb->utime);
1553                 mddev->level = le32_to_cpu(sb->level);
1554                 mddev->clevel[0] = 0;
1555                 mddev->layout = le32_to_cpu(sb->layout);
1556                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1557                 mddev->dev_sectors = le64_to_cpu(sb->size);
1558                 mddev->events = ev1;
1559                 mddev->bitmap_info.offset = 0;
1560                 mddev->bitmap_info.space = 0;
1561                 /* Default location for bitmap is 1K after superblock
1562                  * using 3K - total of 4K
1563                  */
1564                 mddev->bitmap_info.default_offset = 1024 >> 9;
1565                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1566                 mddev->reshape_backwards = 0;
1567
1568                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1569                 memcpy(mddev->uuid, sb->set_uuid, 16);
1570
1571                 mddev->max_disks =  (4096-256)/2;
1572
1573                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1574                     mddev->bitmap_info.file == NULL) {
1575                         mddev->bitmap_info.offset =
1576                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1577                         /* Metadata doesn't record how much space is available.
1578                          * For 1.0, we assume we can use up to the superblock
1579                          * if before, else to 4K beyond superblock.
1580                          * For others, assume no change is possible.
1581                          */
1582                         if (mddev->minor_version > 0)
1583                                 mddev->bitmap_info.space = 0;
1584                         else if (mddev->bitmap_info.offset > 0)
1585                                 mddev->bitmap_info.space =
1586                                         8 - mddev->bitmap_info.offset;
1587                         else
1588                                 mddev->bitmap_info.space =
1589                                         -mddev->bitmap_info.offset;
1590                 }
1591
1592                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1593                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1594                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1595                         mddev->new_level = le32_to_cpu(sb->new_level);
1596                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1597                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1598                         if (mddev->delta_disks < 0 ||
1599                             (mddev->delta_disks == 0 &&
1600                              (le32_to_cpu(sb->feature_map)
1601                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1602                                 mddev->reshape_backwards = 1;
1603                 } else {
1604                         mddev->reshape_position = MaxSector;
1605                         mddev->delta_disks = 0;
1606                         mddev->new_level = mddev->level;
1607                         mddev->new_layout = mddev->layout;
1608                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1609                 }
1610
1611                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1612                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1613         } else if (mddev->pers == NULL) {
1614                 /* Insist of good event counter while assembling, except for
1615                  * spares (which don't need an event count) */
1616                 ++ev1;
1617                 if (rdev->desc_nr >= 0 &&
1618                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1619                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1620                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1621                         if (ev1 < mddev->events)
1622                                 return -EINVAL;
1623         } else if (mddev->bitmap) {
1624                 /* If adding to array with a bitmap, then we can accept an
1625                  * older device, but not too old.
1626                  */
1627                 if (ev1 < mddev->bitmap->events_cleared)
1628                         return 0;
1629                 if (ev1 < mddev->events)
1630                         set_bit(Bitmap_sync, &rdev->flags);
1631         } else {
1632                 if (ev1 < mddev->events)
1633                         /* just a hot-add of a new device, leave raid_disk at -1 */
1634                         return 0;
1635         }
1636         if (mddev->level != LEVEL_MULTIPATH) {
1637                 int role;
1638                 if (rdev->desc_nr < 0 ||
1639                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1640                         role = MD_DISK_ROLE_SPARE;
1641                         rdev->desc_nr = -1;
1642                 } else
1643                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1644                 switch(role) {
1645                 case MD_DISK_ROLE_SPARE: /* spare */
1646                         break;
1647                 case MD_DISK_ROLE_FAULTY: /* faulty */
1648                         set_bit(Faulty, &rdev->flags);
1649                         break;
1650                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1651                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1652                                 /* journal device without journal feature */
1653                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1654                                 return -EINVAL;
1655                         }
1656                         set_bit(Journal, &rdev->flags);
1657                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1658                         rdev->raid_disk = 0;
1659                         break;
1660                 default:
1661                         rdev->saved_raid_disk = role;
1662                         if ((le32_to_cpu(sb->feature_map) &
1663                              MD_FEATURE_RECOVERY_OFFSET)) {
1664                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1665                                 if (!(le32_to_cpu(sb->feature_map) &
1666                                       MD_FEATURE_RECOVERY_BITMAP))
1667                                         rdev->saved_raid_disk = -1;
1668                         } else
1669                                 set_bit(In_sync, &rdev->flags);
1670                         rdev->raid_disk = role;
1671                         break;
1672                 }
1673                 if (sb->devflags & WriteMostly1)
1674                         set_bit(WriteMostly, &rdev->flags);
1675                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1676                         set_bit(Replacement, &rdev->flags);
1677         } else /* MULTIPATH are always insync */
1678                 set_bit(In_sync, &rdev->flags);
1679
1680         return 0;
1681 }
1682
1683 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1684 {
1685         struct mdp_superblock_1 *sb;
1686         struct md_rdev *rdev2;
1687         int max_dev, i;
1688         /* make rdev->sb match mddev and rdev data. */
1689
1690         sb = page_address(rdev->sb_page);
1691
1692         sb->feature_map = 0;
1693         sb->pad0 = 0;
1694         sb->recovery_offset = cpu_to_le64(0);
1695         memset(sb->pad3, 0, sizeof(sb->pad3));
1696
1697         sb->utime = cpu_to_le64((__u64)mddev->utime);
1698         sb->events = cpu_to_le64(mddev->events);
1699         if (mddev->in_sync)
1700                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1701         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1702                 sb->resync_offset = cpu_to_le64(MaxSector);
1703         else
1704                 sb->resync_offset = cpu_to_le64(0);
1705
1706         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1707
1708         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1709         sb->size = cpu_to_le64(mddev->dev_sectors);
1710         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1711         sb->level = cpu_to_le32(mddev->level);
1712         sb->layout = cpu_to_le32(mddev->layout);
1713
1714         if (test_bit(WriteMostly, &rdev->flags))
1715                 sb->devflags |= WriteMostly1;
1716         else
1717                 sb->devflags &= ~WriteMostly1;
1718         sb->data_offset = cpu_to_le64(rdev->data_offset);
1719         sb->data_size = cpu_to_le64(rdev->sectors);
1720
1721         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1722                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1723                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1724         }
1725
1726         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1727             !test_bit(In_sync, &rdev->flags)) {
1728                 sb->feature_map |=
1729                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1730                 sb->recovery_offset =
1731                         cpu_to_le64(rdev->recovery_offset);
1732                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1733                         sb->feature_map |=
1734                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1735         }
1736         /* Note: recovery_offset and journal_tail share space  */
1737         if (test_bit(Journal, &rdev->flags))
1738                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1739         if (test_bit(Replacement, &rdev->flags))
1740                 sb->feature_map |=
1741                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1742
1743         if (mddev->reshape_position != MaxSector) {
1744                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1745                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1746                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1747                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1748                 sb->new_level = cpu_to_le32(mddev->new_level);
1749                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1750                 if (mddev->delta_disks == 0 &&
1751                     mddev->reshape_backwards)
1752                         sb->feature_map
1753                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1754                 if (rdev->new_data_offset != rdev->data_offset) {
1755                         sb->feature_map
1756                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1757                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1758                                                              - rdev->data_offset));
1759                 }
1760         }
1761
1762         if (mddev_is_clustered(mddev))
1763                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1764
1765         if (rdev->badblocks.count == 0)
1766                 /* Nothing to do for bad blocks*/ ;
1767         else if (sb->bblog_offset == 0)
1768                 /* Cannot record bad blocks on this device */
1769                 md_error(mddev, rdev);
1770         else {
1771                 struct badblocks *bb = &rdev->badblocks;
1772                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1773                 u64 *p = bb->page;
1774                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1775                 if (bb->changed) {
1776                         unsigned seq;
1777
1778 retry:
1779                         seq = read_seqbegin(&bb->lock);
1780
1781                         memset(bbp, 0xff, PAGE_SIZE);
1782
1783                         for (i = 0 ; i < bb->count ; i++) {
1784                                 u64 internal_bb = p[i];
1785                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1786                                                 | BB_LEN(internal_bb));
1787                                 bbp[i] = cpu_to_le64(store_bb);
1788                         }
1789                         bb->changed = 0;
1790                         if (read_seqretry(&bb->lock, seq))
1791                                 goto retry;
1792
1793                         bb->sector = (rdev->sb_start +
1794                                       (int)le32_to_cpu(sb->bblog_offset));
1795                         bb->size = le16_to_cpu(sb->bblog_size);
1796                 }
1797         }
1798
1799         max_dev = 0;
1800         rdev_for_each(rdev2, mddev)
1801                 if (rdev2->desc_nr+1 > max_dev)
1802                         max_dev = rdev2->desc_nr+1;
1803
1804         if (max_dev > le32_to_cpu(sb->max_dev)) {
1805                 int bmask;
1806                 sb->max_dev = cpu_to_le32(max_dev);
1807                 rdev->sb_size = max_dev * 2 + 256;
1808                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1809                 if (rdev->sb_size & bmask)
1810                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1811         } else
1812                 max_dev = le32_to_cpu(sb->max_dev);
1813
1814         for (i=0; i<max_dev;i++)
1815                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1816
1817         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1818                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1819
1820         rdev_for_each(rdev2, mddev) {
1821                 i = rdev2->desc_nr;
1822                 if (test_bit(Faulty, &rdev2->flags))
1823                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1824                 else if (test_bit(In_sync, &rdev2->flags))
1825                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1826                 else if (test_bit(Journal, &rdev2->flags))
1827                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1828                 else if (rdev2->raid_disk >= 0)
1829                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1830                 else
1831                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1832         }
1833
1834         sb->sb_csum = calc_sb_1_csum(sb);
1835 }
1836
1837 static unsigned long long
1838 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1839 {
1840         struct mdp_superblock_1 *sb;
1841         sector_t max_sectors;
1842         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1843                 return 0; /* component must fit device */
1844         if (rdev->data_offset != rdev->new_data_offset)
1845                 return 0; /* too confusing */
1846         if (rdev->sb_start < rdev->data_offset) {
1847                 /* minor versions 1 and 2; superblock before data */
1848                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1849                 max_sectors -= rdev->data_offset;
1850                 if (!num_sectors || num_sectors > max_sectors)
1851                         num_sectors = max_sectors;
1852         } else if (rdev->mddev->bitmap_info.offset) {
1853                 /* minor version 0 with bitmap we can't move */
1854                 return 0;
1855         } else {
1856                 /* minor version 0; superblock after data */
1857                 sector_t sb_start;
1858                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1859                 sb_start &= ~(sector_t)(4*2 - 1);
1860                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1861                 if (!num_sectors || num_sectors > max_sectors)
1862                         num_sectors = max_sectors;
1863                 rdev->sb_start = sb_start;
1864         }
1865         sb = page_address(rdev->sb_page);
1866         sb->data_size = cpu_to_le64(num_sectors);
1867         sb->super_offset = rdev->sb_start;
1868         sb->sb_csum = calc_sb_1_csum(sb);
1869         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1870                        rdev->sb_page);
1871         md_super_wait(rdev->mddev);
1872         return num_sectors;
1873
1874 }
1875
1876 static int
1877 super_1_allow_new_offset(struct md_rdev *rdev,
1878                          unsigned long long new_offset)
1879 {
1880         /* All necessary checks on new >= old have been done */
1881         struct bitmap *bitmap;
1882         if (new_offset >= rdev->data_offset)
1883                 return 1;
1884
1885         /* with 1.0 metadata, there is no metadata to tread on
1886          * so we can always move back */
1887         if (rdev->mddev->minor_version == 0)
1888                 return 1;
1889
1890         /* otherwise we must be sure not to step on
1891          * any metadata, so stay:
1892          * 36K beyond start of superblock
1893          * beyond end of badblocks
1894          * beyond write-intent bitmap
1895          */
1896         if (rdev->sb_start + (32+4)*2 > new_offset)
1897                 return 0;
1898         bitmap = rdev->mddev->bitmap;
1899         if (bitmap && !rdev->mddev->bitmap_info.file &&
1900             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1901             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1902                 return 0;
1903         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1904                 return 0;
1905
1906         return 1;
1907 }
1908
1909 static struct super_type super_types[] = {
1910         [0] = {
1911                 .name   = "0.90.0",
1912                 .owner  = THIS_MODULE,
1913                 .load_super         = super_90_load,
1914                 .validate_super     = super_90_validate,
1915                 .sync_super         = super_90_sync,
1916                 .rdev_size_change   = super_90_rdev_size_change,
1917                 .allow_new_offset   = super_90_allow_new_offset,
1918         },
1919         [1] = {
1920                 .name   = "md-1",
1921                 .owner  = THIS_MODULE,
1922                 .load_super         = super_1_load,
1923                 .validate_super     = super_1_validate,
1924                 .sync_super         = super_1_sync,
1925                 .rdev_size_change   = super_1_rdev_size_change,
1926                 .allow_new_offset   = super_1_allow_new_offset,
1927         },
1928 };
1929
1930 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1931 {
1932         if (mddev->sync_super) {
1933                 mddev->sync_super(mddev, rdev);
1934                 return;
1935         }
1936
1937         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1938
1939         super_types[mddev->major_version].sync_super(mddev, rdev);
1940 }
1941
1942 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1943 {
1944         struct md_rdev *rdev, *rdev2;
1945
1946         rcu_read_lock();
1947         rdev_for_each_rcu(rdev, mddev1) {
1948                 if (test_bit(Faulty, &rdev->flags) ||
1949                     test_bit(Journal, &rdev->flags) ||
1950                     rdev->raid_disk == -1)
1951                         continue;
1952                 rdev_for_each_rcu(rdev2, mddev2) {
1953                         if (test_bit(Faulty, &rdev2->flags) ||
1954                             test_bit(Journal, &rdev2->flags) ||
1955                             rdev2->raid_disk == -1)
1956                                 continue;
1957                         if (rdev->bdev->bd_contains ==
1958                             rdev2->bdev->bd_contains) {
1959                                 rcu_read_unlock();
1960                                 return 1;
1961                         }
1962                 }
1963         }
1964         rcu_read_unlock();
1965         return 0;
1966 }
1967
1968 static LIST_HEAD(pending_raid_disks);
1969
1970 /*
1971  * Try to register data integrity profile for an mddev
1972  *
1973  * This is called when an array is started and after a disk has been kicked
1974  * from the array. It only succeeds if all working and active component devices
1975  * are integrity capable with matching profiles.
1976  */
1977 int md_integrity_register(struct mddev *mddev)
1978 {
1979         struct md_rdev *rdev, *reference = NULL;
1980
1981         if (list_empty(&mddev->disks))
1982                 return 0; /* nothing to do */
1983         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1984                 return 0; /* shouldn't register, or already is */
1985         rdev_for_each(rdev, mddev) {
1986                 /* skip spares and non-functional disks */
1987                 if (test_bit(Faulty, &rdev->flags))
1988                         continue;
1989                 if (rdev->raid_disk < 0)
1990                         continue;
1991                 if (!reference) {
1992                         /* Use the first rdev as the reference */
1993                         reference = rdev;
1994                         continue;
1995                 }
1996                 /* does this rdev's profile match the reference profile? */
1997                 if (blk_integrity_compare(reference->bdev->bd_disk,
1998                                 rdev->bdev->bd_disk) < 0)
1999                         return -EINVAL;
2000         }
2001         if (!reference || !bdev_get_integrity(reference->bdev))
2002                 return 0;
2003         /*
2004          * All component devices are integrity capable and have matching
2005          * profiles, register the common profile for the md device.
2006          */
2007         blk_integrity_register(mddev->gendisk,
2008                                bdev_get_integrity(reference->bdev));
2009
2010         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2011         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2012                 pr_err("md: failed to create integrity pool for %s\n",
2013                        mdname(mddev));
2014                 return -EINVAL;
2015         }
2016         return 0;
2017 }
2018 EXPORT_SYMBOL(md_integrity_register);
2019
2020 /*
2021  * Attempt to add an rdev, but only if it is consistent with the current
2022  * integrity profile
2023  */
2024 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2025 {
2026         struct blk_integrity *bi_rdev;
2027         struct blk_integrity *bi_mddev;
2028         char name[BDEVNAME_SIZE];
2029
2030         if (!mddev->gendisk)
2031                 return 0;
2032
2033         bi_rdev = bdev_get_integrity(rdev->bdev);
2034         bi_mddev = blk_get_integrity(mddev->gendisk);
2035
2036         if (!bi_mddev) /* nothing to do */
2037                 return 0;
2038
2039         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2040                 pr_err("%s: incompatible integrity profile for %s\n",
2041                        mdname(mddev), bdevname(rdev->bdev, name));
2042                 return -ENXIO;
2043         }
2044
2045         return 0;
2046 }
2047 EXPORT_SYMBOL(md_integrity_add_rdev);
2048
2049 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2050 {
2051         char b[BDEVNAME_SIZE];
2052         struct kobject *ko;
2053         int err;
2054
2055         /* prevent duplicates */
2056         if (find_rdev(mddev, rdev->bdev->bd_dev))
2057                 return -EEXIST;
2058
2059         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2060         if (!test_bit(Journal, &rdev->flags) &&
2061             rdev->sectors &&
2062             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2063                 if (mddev->pers) {
2064                         /* Cannot change size, so fail
2065                          * If mddev->level <= 0, then we don't care
2066                          * about aligning sizes (e.g. linear)
2067                          */
2068                         if (mddev->level > 0)
2069                                 return -ENOSPC;
2070                 } else
2071                         mddev->dev_sectors = rdev->sectors;
2072         }
2073
2074         /* Verify rdev->desc_nr is unique.
2075          * If it is -1, assign a free number, else
2076          * check number is not in use
2077          */
2078         rcu_read_lock();
2079         if (rdev->desc_nr < 0) {
2080                 int choice = 0;
2081                 if (mddev->pers)
2082                         choice = mddev->raid_disks;
2083                 while (md_find_rdev_nr_rcu(mddev, choice))
2084                         choice++;
2085                 rdev->desc_nr = choice;
2086         } else {
2087                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2088                         rcu_read_unlock();
2089                         return -EBUSY;
2090                 }
2091         }
2092         rcu_read_unlock();
2093         if (!test_bit(Journal, &rdev->flags) &&
2094             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2095                 pr_warn("md: %s: array is limited to %d devices\n",
2096                         mdname(mddev), mddev->max_disks);
2097                 return -EBUSY;
2098         }
2099         bdevname(rdev->bdev,b);
2100         strreplace(b, '/', '!');
2101
2102         rdev->mddev = mddev;
2103         pr_debug("md: bind<%s>\n", b);
2104
2105         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2106                 goto fail;
2107
2108         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2109         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2110                 /* failure here is OK */;
2111         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2112
2113         list_add_rcu(&rdev->same_set, &mddev->disks);
2114         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2115
2116         /* May as well allow recovery to be retried once */
2117         mddev->recovery_disabled++;
2118
2119         return 0;
2120
2121  fail:
2122         pr_warn("md: failed to register dev-%s for %s\n",
2123                 b, mdname(mddev));
2124         return err;
2125 }
2126
2127 static void md_delayed_delete(struct work_struct *ws)
2128 {
2129         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2130         kobject_del(&rdev->kobj);
2131         kobject_put(&rdev->kobj);
2132 }
2133
2134 static void unbind_rdev_from_array(struct md_rdev *rdev)
2135 {
2136         char b[BDEVNAME_SIZE];
2137
2138         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2139         list_del_rcu(&rdev->same_set);
2140         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2141         rdev->mddev = NULL;
2142         sysfs_remove_link(&rdev->kobj, "block");
2143         sysfs_put(rdev->sysfs_state);
2144         rdev->sysfs_state = NULL;
2145         rdev->badblocks.count = 0;
2146         /* We need to delay this, otherwise we can deadlock when
2147          * writing to 'remove' to "dev/state".  We also need
2148          * to delay it due to rcu usage.
2149          */
2150         synchronize_rcu();
2151         INIT_WORK(&rdev->del_work, md_delayed_delete);
2152         kobject_get(&rdev->kobj);
2153         queue_work(md_misc_wq, &rdev->del_work);
2154 }
2155
2156 /*
2157  * prevent the device from being mounted, repartitioned or
2158  * otherwise reused by a RAID array (or any other kernel
2159  * subsystem), by bd_claiming the device.
2160  */
2161 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2162 {
2163         int err = 0;
2164         struct block_device *bdev;
2165         char b[BDEVNAME_SIZE];
2166
2167         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2168                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2169         if (IS_ERR(bdev)) {
2170                 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2171                 return PTR_ERR(bdev);
2172         }
2173         rdev->bdev = bdev;
2174         return err;
2175 }
2176
2177 static void unlock_rdev(struct md_rdev *rdev)
2178 {
2179         struct block_device *bdev = rdev->bdev;
2180         rdev->bdev = NULL;
2181         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2182 }
2183
2184 void md_autodetect_dev(dev_t dev);
2185
2186 static void export_rdev(struct md_rdev *rdev)
2187 {
2188         char b[BDEVNAME_SIZE];
2189
2190         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2191         md_rdev_clear(rdev);
2192 #ifndef MODULE
2193         if (test_bit(AutoDetected, &rdev->flags))
2194                 md_autodetect_dev(rdev->bdev->bd_dev);
2195 #endif
2196         unlock_rdev(rdev);
2197         kobject_put(&rdev->kobj);
2198 }
2199
2200 void md_kick_rdev_from_array(struct md_rdev *rdev)
2201 {
2202         unbind_rdev_from_array(rdev);
2203         export_rdev(rdev);
2204 }
2205 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2206
2207 static void export_array(struct mddev *mddev)
2208 {
2209         struct md_rdev *rdev;
2210
2211         while (!list_empty(&mddev->disks)) {
2212                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2213                                         same_set);
2214                 md_kick_rdev_from_array(rdev);
2215         }
2216         mddev->raid_disks = 0;
2217         mddev->major_version = 0;
2218 }
2219
2220 static void sync_sbs(struct mddev *mddev, int nospares)
2221 {
2222         /* Update each superblock (in-memory image), but
2223          * if we are allowed to, skip spares which already
2224          * have the right event counter, or have one earlier
2225          * (which would mean they aren't being marked as dirty
2226          * with the rest of the array)
2227          */
2228         struct md_rdev *rdev;
2229         rdev_for_each(rdev, mddev) {
2230                 if (rdev->sb_events == mddev->events ||
2231                     (nospares &&
2232                      rdev->raid_disk < 0 &&
2233                      rdev->sb_events+1 == mddev->events)) {
2234                         /* Don't update this superblock */
2235                         rdev->sb_loaded = 2;
2236                 } else {
2237                         sync_super(mddev, rdev);
2238                         rdev->sb_loaded = 1;
2239                 }
2240         }
2241 }
2242
2243 static bool does_sb_need_changing(struct mddev *mddev)
2244 {
2245         struct md_rdev *rdev;
2246         struct mdp_superblock_1 *sb;
2247         int role;
2248
2249         /* Find a good rdev */
2250         rdev_for_each(rdev, mddev)
2251                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2252                         break;
2253
2254         /* No good device found. */
2255         if (!rdev)
2256                 return false;
2257
2258         sb = page_address(rdev->sb_page);
2259         /* Check if a device has become faulty or a spare become active */
2260         rdev_for_each(rdev, mddev) {
2261                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2262                 /* Device activated? */
2263                 if (role == 0xffff && rdev->raid_disk >=0 &&
2264                     !test_bit(Faulty, &rdev->flags))
2265                         return true;
2266                 /* Device turned faulty? */
2267                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2268                         return true;
2269         }
2270
2271         /* Check if any mddev parameters have changed */
2272         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2273             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2274             (mddev->layout != le64_to_cpu(sb->layout)) ||
2275             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2276             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2277                 return true;
2278
2279         return false;
2280 }
2281
2282 void md_update_sb(struct mddev *mddev, int force_change)
2283 {
2284         struct md_rdev *rdev;
2285         int sync_req;
2286         int nospares = 0;
2287         int any_badblocks_changed = 0;
2288         int ret = -1;
2289
2290         if (mddev->ro) {
2291                 if (force_change)
2292                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2293                 return;
2294         }
2295
2296 repeat:
2297         if (mddev_is_clustered(mddev)) {
2298                 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2299                         force_change = 1;
2300                 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2301                         nospares = 1;
2302                 ret = md_cluster_ops->metadata_update_start(mddev);
2303                 /* Has someone else has updated the sb */
2304                 if (!does_sb_need_changing(mddev)) {
2305                         if (ret == 0)
2306                                 md_cluster_ops->metadata_update_cancel(mddev);
2307                         bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2308                                                          BIT(MD_CHANGE_DEVS) |
2309                                                          BIT(MD_CHANGE_CLEAN));
2310                         return;
2311                 }
2312         }
2313
2314         /* First make sure individual recovery_offsets are correct */
2315         rdev_for_each(rdev, mddev) {
2316                 if (rdev->raid_disk >= 0 &&
2317                     mddev->delta_disks >= 0 &&
2318                     !test_bit(Journal, &rdev->flags) &&
2319                     !test_bit(In_sync, &rdev->flags) &&
2320                     mddev->curr_resync_completed > rdev->recovery_offset)
2321                                 rdev->recovery_offset = mddev->curr_resync_completed;
2322
2323         }
2324         if (!mddev->persistent) {
2325                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2326                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2327                 if (!mddev->external) {
2328                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2329                         rdev_for_each(rdev, mddev) {
2330                                 if (rdev->badblocks.changed) {
2331                                         rdev->badblocks.changed = 0;
2332                                         ack_all_badblocks(&rdev->badblocks);
2333                                         md_error(mddev, rdev);
2334                                 }
2335                                 clear_bit(Blocked, &rdev->flags);
2336                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2337                                 wake_up(&rdev->blocked_wait);
2338                         }
2339                 }
2340                 wake_up(&mddev->sb_wait);
2341                 return;
2342         }
2343
2344         spin_lock(&mddev->lock);
2345
2346         mddev->utime = ktime_get_real_seconds();
2347
2348         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2349                 force_change = 1;
2350         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2351                 /* just a clean<-> dirty transition, possibly leave spares alone,
2352                  * though if events isn't the right even/odd, we will have to do
2353                  * spares after all
2354                  */
2355                 nospares = 1;
2356         if (force_change)
2357                 nospares = 0;
2358         if (mddev->degraded)
2359                 /* If the array is degraded, then skipping spares is both
2360                  * dangerous and fairly pointless.
2361                  * Dangerous because a device that was removed from the array
2362                  * might have a event_count that still looks up-to-date,
2363                  * so it can be re-added without a resync.
2364                  * Pointless because if there are any spares to skip,
2365                  * then a recovery will happen and soon that array won't
2366                  * be degraded any more and the spare can go back to sleep then.
2367                  */
2368                 nospares = 0;
2369
2370         sync_req = mddev->in_sync;
2371
2372         /* If this is just a dirty<->clean transition, and the array is clean
2373          * and 'events' is odd, we can roll back to the previous clean state */
2374         if (nospares
2375             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2376             && mddev->can_decrease_events
2377             && mddev->events != 1) {
2378                 mddev->events--;
2379                 mddev->can_decrease_events = 0;
2380         } else {
2381                 /* otherwise we have to go forward and ... */
2382                 mddev->events ++;
2383                 mddev->can_decrease_events = nospares;
2384         }
2385
2386         /*
2387          * This 64-bit counter should never wrap.
2388          * Either we are in around ~1 trillion A.C., assuming
2389          * 1 reboot per second, or we have a bug...
2390          */
2391         WARN_ON(mddev->events == 0);
2392
2393         rdev_for_each(rdev, mddev) {
2394                 if (rdev->badblocks.changed)
2395                         any_badblocks_changed++;
2396                 if (test_bit(Faulty, &rdev->flags))
2397                         set_bit(FaultRecorded, &rdev->flags);
2398         }
2399
2400         sync_sbs(mddev, nospares);
2401         spin_unlock(&mddev->lock);
2402
2403         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2404                  mdname(mddev), mddev->in_sync);
2405
2406         bitmap_update_sb(mddev->bitmap);
2407         rdev_for_each(rdev, mddev) {
2408                 char b[BDEVNAME_SIZE];
2409
2410                 if (rdev->sb_loaded != 1)
2411                         continue; /* no noise on spare devices */
2412
2413                 if (!test_bit(Faulty, &rdev->flags)) {
2414                         md_super_write(mddev,rdev,
2415                                        rdev->sb_start, rdev->sb_size,
2416                                        rdev->sb_page);
2417                         pr_debug("md: (write) %s's sb offset: %llu\n",
2418                                  bdevname(rdev->bdev, b),
2419                                  (unsigned long long)rdev->sb_start);
2420                         rdev->sb_events = mddev->events;
2421                         if (rdev->badblocks.size) {
2422                                 md_super_write(mddev, rdev,
2423                                                rdev->badblocks.sector,
2424                                                rdev->badblocks.size << 9,
2425                                                rdev->bb_page);
2426                                 rdev->badblocks.size = 0;
2427                         }
2428
2429                 } else
2430                         pr_debug("md: %s (skipping faulty)\n",
2431                                  bdevname(rdev->bdev, b));
2432
2433                 if (mddev->level == LEVEL_MULTIPATH)
2434                         /* only need to write one superblock... */
2435                         break;
2436         }
2437         md_super_wait(mddev);
2438         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2439
2440         if (mddev_is_clustered(mddev) && ret == 0)
2441                 md_cluster_ops->metadata_update_finish(mddev);
2442
2443         if (mddev->in_sync != sync_req ||
2444             !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2445                                BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
2446                 /* have to write it out again */
2447                 goto repeat;
2448         wake_up(&mddev->sb_wait);
2449         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2450                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2451
2452         rdev_for_each(rdev, mddev) {
2453                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2454                         clear_bit(Blocked, &rdev->flags);
2455
2456                 if (any_badblocks_changed)
2457                         ack_all_badblocks(&rdev->badblocks);
2458                 clear_bit(BlockedBadBlocks, &rdev->flags);
2459                 wake_up(&rdev->blocked_wait);
2460         }
2461 }
2462 EXPORT_SYMBOL(md_update_sb);
2463
2464 static int add_bound_rdev(struct md_rdev *rdev)
2465 {
2466         struct mddev *mddev = rdev->mddev;
2467         int err = 0;
2468         bool add_journal = test_bit(Journal, &rdev->flags);
2469
2470         if (!mddev->pers->hot_remove_disk || add_journal) {
2471                 /* If there is hot_add_disk but no hot_remove_disk
2472                  * then added disks for geometry changes,
2473                  * and should be added immediately.
2474                  */
2475                 super_types[mddev->major_version].
2476                         validate_super(mddev, rdev);
2477                 if (add_journal)
2478                         mddev_suspend(mddev);
2479                 err = mddev->pers->hot_add_disk(mddev, rdev);
2480                 if (add_journal)
2481                         mddev_resume(mddev);
2482                 if (err) {
2483                         md_kick_rdev_from_array(rdev);
2484                         return err;
2485                 }
2486         }
2487         sysfs_notify_dirent_safe(rdev->sysfs_state);
2488
2489         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2490         if (mddev->degraded)
2491                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2492         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2493         md_new_event(mddev);
2494         md_wakeup_thread(mddev->thread);
2495         return 0;
2496 }
2497
2498 /* words written to sysfs files may, or may not, be \n terminated.
2499  * We want to accept with case. For this we use cmd_match.
2500  */
2501 static int cmd_match(const char *cmd, const char *str)
2502 {
2503         /* See if cmd, written into a sysfs file, matches
2504          * str.  They must either be the same, or cmd can
2505          * have a trailing newline
2506          */
2507         while (*cmd && *str && *cmd == *str) {
2508                 cmd++;
2509                 str++;
2510         }
2511         if (*cmd == '\n')
2512                 cmd++;
2513         if (*str || *cmd)
2514                 return 0;
2515         return 1;
2516 }
2517
2518 struct rdev_sysfs_entry {
2519         struct attribute attr;
2520         ssize_t (*show)(struct md_rdev *, char *);
2521         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2522 };
2523
2524 static ssize_t
2525 state_show(struct md_rdev *rdev, char *page)
2526 {
2527         char *sep = ",";
2528         size_t len = 0;
2529         unsigned long flags = ACCESS_ONCE(rdev->flags);
2530
2531         if (test_bit(Faulty, &flags) ||
2532             (!test_bit(ExternalBbl, &flags) &&
2533             rdev->badblocks.unacked_exist))
2534                 len += sprintf(page+len, "faulty%s", sep);
2535         if (test_bit(In_sync, &flags))
2536                 len += sprintf(page+len, "in_sync%s", sep);
2537         if (test_bit(Journal, &flags))
2538                 len += sprintf(page+len, "journal%s", sep);
2539         if (test_bit(WriteMostly, &flags))
2540                 len += sprintf(page+len, "write_mostly%s", sep);
2541         if (test_bit(Blocked, &flags) ||
2542             (rdev->badblocks.unacked_exist
2543              && !test_bit(Faulty, &flags)))
2544                 len += sprintf(page+len, "blocked%s", sep);
2545         if (!test_bit(Faulty, &flags) &&
2546             !test_bit(Journal, &flags) &&
2547             !test_bit(In_sync, &flags))
2548                 len += sprintf(page+len, "spare%s", sep);
2549         if (test_bit(WriteErrorSeen, &flags))
2550                 len += sprintf(page+len, "write_error%s", sep);
2551         if (test_bit(WantReplacement, &flags))
2552                 len += sprintf(page+len, "want_replacement%s", sep);
2553         if (test_bit(Replacement, &flags))
2554                 len += sprintf(page+len, "replacement%s", sep);
2555         if (test_bit(ExternalBbl, &flags))
2556                 len += sprintf(page+len, "external_bbl%s", sep);
2557
2558         if (len)
2559                 len -= strlen(sep);
2560
2561         return len+sprintf(page+len, "\n");
2562 }
2563
2564 static ssize_t
2565 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2566 {
2567         /* can write
2568          *  faulty  - simulates an error
2569          *  remove  - disconnects the device
2570          *  writemostly - sets write_mostly
2571          *  -writemostly - clears write_mostly
2572          *  blocked - sets the Blocked flags
2573          *  -blocked - clears the Blocked and possibly simulates an error
2574          *  insync - sets Insync providing device isn't active
2575          *  -insync - clear Insync for a device with a slot assigned,
2576          *            so that it gets rebuilt based on bitmap
2577          *  write_error - sets WriteErrorSeen
2578          *  -write_error - clears WriteErrorSeen
2579          */
2580         int err = -EINVAL;
2581         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2582                 md_error(rdev->mddev, rdev);
2583                 if (test_bit(Faulty, &rdev->flags))
2584                         err = 0;
2585                 else
2586                         err = -EBUSY;
2587         } else if (cmd_match(buf, "remove")) {
2588                 if (rdev->mddev->pers) {
2589                         clear_bit(Blocked, &rdev->flags);
2590                         remove_and_add_spares(rdev->mddev, rdev);
2591                 }
2592                 if (rdev->raid_disk >= 0)
2593                         err = -EBUSY;
2594                 else {
2595                         struct mddev *mddev = rdev->mddev;
2596                         err = 0;
2597                         if (mddev_is_clustered(mddev))
2598                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2599
2600                         if (err == 0) {
2601                                 md_kick_rdev_from_array(rdev);
2602                                 if (mddev->pers) {
2603                                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2604                                         md_wakeup_thread(mddev->thread);
2605                                 }
2606                                 md_new_event(mddev);
2607                         }
2608                 }
2609         } else if (cmd_match(buf, "writemostly")) {
2610                 set_bit(WriteMostly, &rdev->flags);
2611                 err = 0;
2612         } else if (cmd_match(buf, "-writemostly")) {
2613                 clear_bit(WriteMostly, &rdev->flags);
2614                 err = 0;
2615         } else if (cmd_match(buf, "blocked")) {
2616                 set_bit(Blocked, &rdev->flags);
2617                 err = 0;
2618         } else if (cmd_match(buf, "-blocked")) {
2619                 if (!test_bit(Faulty, &rdev->flags) &&
2620                     !test_bit(ExternalBbl, &rdev->flags) &&
2621                     rdev->badblocks.unacked_exist) {
2622                         /* metadata handler doesn't understand badblocks,
2623                          * so we need to fail the device
2624                          */
2625                         md_error(rdev->mddev, rdev);
2626                 }
2627                 clear_bit(Blocked, &rdev->flags);
2628                 clear_bit(BlockedBadBlocks, &rdev->flags);
2629                 wake_up(&rdev->blocked_wait);
2630                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2631                 md_wakeup_thread(rdev->mddev->thread);
2632
2633                 err = 0;
2634         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2635                 set_bit(In_sync, &rdev->flags);
2636                 err = 0;
2637         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2638                    !test_bit(Journal, &rdev->flags)) {
2639                 if (rdev->mddev->pers == NULL) {
2640                         clear_bit(In_sync, &rdev->flags);
2641                         rdev->saved_raid_disk = rdev->raid_disk;
2642                         rdev->raid_disk = -1;
2643                         err = 0;
2644                 }
2645         } else if (cmd_match(buf, "write_error")) {
2646                 set_bit(WriteErrorSeen, &rdev->flags);
2647                 err = 0;
2648         } else if (cmd_match(buf, "-write_error")) {
2649                 clear_bit(WriteErrorSeen, &rdev->flags);
2650                 err = 0;
2651         } else if (cmd_match(buf, "want_replacement")) {
2652                 /* Any non-spare device that is not a replacement can
2653                  * become want_replacement at any time, but we then need to
2654                  * check if recovery is needed.
2655                  */
2656                 if (rdev->raid_disk >= 0 &&
2657                     !test_bit(Journal, &rdev->flags) &&
2658                     !test_bit(Replacement, &rdev->flags))
2659                         set_bit(WantReplacement, &rdev->flags);
2660                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2661                 md_wakeup_thread(rdev->mddev->thread);
2662                 err = 0;
2663         } else if (cmd_match(buf, "-want_replacement")) {
2664                 /* Clearing 'want_replacement' is always allowed.
2665                  * Once replacements starts it is too late though.
2666                  */
2667                 err = 0;
2668                 clear_bit(WantReplacement, &rdev->flags);
2669         } else if (cmd_match(buf, "replacement")) {
2670                 /* Can only set a device as a replacement when array has not
2671                  * yet been started.  Once running, replacement is automatic
2672                  * from spares, or by assigning 'slot'.
2673                  */
2674                 if (rdev->mddev->pers)
2675                         err = -EBUSY;
2676                 else {
2677                         set_bit(Replacement, &rdev->flags);
2678                         err = 0;
2679                 }
2680         } else if (cmd_match(buf, "-replacement")) {
2681                 /* Similarly, can only clear Replacement before start */
2682                 if (rdev->mddev->pers)
2683                         err = -EBUSY;
2684                 else {
2685                         clear_bit(Replacement, &rdev->flags);
2686                         err = 0;
2687                 }
2688         } else if (cmd_match(buf, "re-add")) {
2689                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2690                         /* clear_bit is performed _after_ all the devices
2691                          * have their local Faulty bit cleared. If any writes
2692                          * happen in the meantime in the local node, they
2693                          * will land in the local bitmap, which will be synced
2694                          * by this node eventually
2695                          */
2696                         if (!mddev_is_clustered(rdev->mddev) ||
2697                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2698                                 clear_bit(Faulty, &rdev->flags);
2699                                 err = add_bound_rdev(rdev);
2700                         }
2701                 } else
2702                         err = -EBUSY;
2703         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
2704                 set_bit(ExternalBbl, &rdev->flags);
2705                 rdev->badblocks.shift = 0;
2706                 err = 0;
2707         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
2708                 clear_bit(ExternalBbl, &rdev->flags);
2709                 err = 0;
2710         }
2711         if (!err)
2712                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2713         return err ? err : len;
2714 }
2715 static struct rdev_sysfs_entry rdev_state =
2716 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2717
2718 static ssize_t
2719 errors_show(struct md_rdev *rdev, char *page)
2720 {
2721         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2722 }
2723
2724 static ssize_t
2725 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2726 {
2727         unsigned int n;
2728         int rv;
2729
2730         rv = kstrtouint(buf, 10, &n);
2731         if (rv < 0)
2732                 return rv;
2733         atomic_set(&rdev->corrected_errors, n);
2734         return len;
2735 }
2736 static struct rdev_sysfs_entry rdev_errors =
2737 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2738
2739 static ssize_t
2740 slot_show(struct md_rdev *rdev, char *page)
2741 {
2742         if (test_bit(Journal, &rdev->flags))
2743                 return sprintf(page, "journal\n");
2744         else if (rdev->raid_disk < 0)
2745                 return sprintf(page, "none\n");
2746         else
2747                 return sprintf(page, "%d\n", rdev->raid_disk);
2748 }
2749
2750 static ssize_t
2751 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2752 {
2753         int slot;
2754         int err;
2755
2756         if (test_bit(Journal, &rdev->flags))
2757                 return -EBUSY;
2758         if (strncmp(buf, "none", 4)==0)
2759                 slot = -1;
2760         else {
2761                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2762                 if (err < 0)
2763                         return err;
2764         }
2765         if (rdev->mddev->pers && slot == -1) {
2766                 /* Setting 'slot' on an active array requires also
2767                  * updating the 'rd%d' link, and communicating
2768                  * with the personality with ->hot_*_disk.
2769                  * For now we only support removing
2770                  * failed/spare devices.  This normally happens automatically,
2771                  * but not when the metadata is externally managed.
2772                  */
2773                 if (rdev->raid_disk == -1)
2774                         return -EEXIST;
2775                 /* personality does all needed checks */
2776                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2777                         return -EINVAL;
2778                 clear_bit(Blocked, &rdev->flags);
2779                 remove_and_add_spares(rdev->mddev, rdev);
2780                 if (rdev->raid_disk >= 0)
2781                         return -EBUSY;
2782                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2783                 md_wakeup_thread(rdev->mddev->thread);
2784         } else if (rdev->mddev->pers) {
2785                 /* Activating a spare .. or possibly reactivating
2786                  * if we ever get bitmaps working here.
2787                  */
2788                 int err;
2789
2790                 if (rdev->raid_disk != -1)
2791                         return -EBUSY;
2792
2793                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2794                         return -EBUSY;
2795
2796                 if (rdev->mddev->pers->hot_add_disk == NULL)
2797                         return -EINVAL;
2798
2799                 if (slot >= rdev->mddev->raid_disks &&
2800                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2801                         return -ENOSPC;
2802
2803                 rdev->raid_disk = slot;
2804                 if (test_bit(In_sync, &rdev->flags))
2805                         rdev->saved_raid_disk = slot;
2806                 else
2807                         rdev->saved_raid_disk = -1;
2808                 clear_bit(In_sync, &rdev->flags);
2809                 clear_bit(Bitmap_sync, &rdev->flags);
2810                 err = rdev->mddev->pers->
2811                         hot_add_disk(rdev->mddev, rdev);
2812                 if (err) {
2813                         rdev->raid_disk = -1;
2814                         return err;
2815                 } else
2816                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2817                 if (sysfs_link_rdev(rdev->mddev, rdev))
2818                         /* failure here is OK */;
2819                 /* don't wakeup anyone, leave that to userspace. */
2820         } else {
2821                 if (slot >= rdev->mddev->raid_disks &&
2822                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2823                         return -ENOSPC;
2824                 rdev->raid_disk = slot;
2825                 /* assume it is working */
2826                 clear_bit(Faulty, &rdev->flags);
2827                 clear_bit(WriteMostly, &rdev->flags);
2828                 set_bit(In_sync, &rdev->flags);
2829                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2830         }
2831         return len;
2832 }
2833
2834 static struct rdev_sysfs_entry rdev_slot =
2835 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2836
2837 static ssize_t
2838 offset_show(struct md_rdev *rdev, char *page)
2839 {
2840         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2841 }
2842
2843 static ssize_t
2844 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2845 {
2846         unsigned long long offset;
2847         if (kstrtoull(buf, 10, &offset) < 0)
2848                 return -EINVAL;
2849         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2850                 return -EBUSY;
2851         if (rdev->sectors && rdev->mddev->external)
2852                 /* Must set offset before size, so overlap checks
2853                  * can be sane */
2854                 return -EBUSY;
2855         rdev->data_offset = offset;
2856         rdev->new_data_offset = offset;
2857         return len;
2858 }
2859
2860 static struct rdev_sysfs_entry rdev_offset =
2861 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2862
2863 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2864 {
2865         return sprintf(page, "%llu\n",
2866                        (unsigned long long)rdev->new_data_offset);
2867 }
2868
2869 static ssize_t new_offset_store(struct md_rdev *rdev,
2870                                 const char *buf, size_t len)
2871 {
2872         unsigned long long new_offset;
2873         struct mddev *mddev = rdev->mddev;
2874
2875         if (kstrtoull(buf, 10, &new_offset) < 0)
2876                 return -EINVAL;
2877
2878         if (mddev->sync_thread ||
2879             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2880                 return -EBUSY;
2881         if (new_offset == rdev->data_offset)
2882                 /* reset is always permitted */
2883                 ;
2884         else if (new_offset > rdev->data_offset) {
2885                 /* must not push array size beyond rdev_sectors */
2886                 if (new_offset - rdev->data_offset
2887                     + mddev->dev_sectors > rdev->sectors)
2888                                 return -E2BIG;
2889         }
2890         /* Metadata worries about other space details. */
2891
2892         /* decreasing the offset is inconsistent with a backwards
2893          * reshape.
2894          */
2895         if (new_offset < rdev->data_offset &&
2896             mddev->reshape_backwards)
2897                 return -EINVAL;
2898         /* Increasing offset is inconsistent with forwards
2899          * reshape.  reshape_direction should be set to
2900          * 'backwards' first.
2901          */
2902         if (new_offset > rdev->data_offset &&
2903             !mddev->reshape_backwards)
2904                 return -EINVAL;
2905
2906         if (mddev->pers && mddev->persistent &&
2907             !super_types[mddev->major_version]
2908             .allow_new_offset(rdev, new_offset))
2909                 return -E2BIG;
2910         rdev->new_data_offset = new_offset;
2911         if (new_offset > rdev->data_offset)
2912                 mddev->reshape_backwards = 1;
2913         else if (new_offset < rdev->data_offset)
2914                 mddev->reshape_backwards = 0;
2915
2916         return len;
2917 }
2918 static struct rdev_sysfs_entry rdev_new_offset =
2919 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2920
2921 static ssize_t
2922 rdev_size_show(struct md_rdev *rdev, char *page)
2923 {
2924         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2925 }
2926
2927 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2928 {
2929         /* check if two start/length pairs overlap */
2930         if (s1+l1 <= s2)
2931                 return 0;
2932         if (s2+l2 <= s1)
2933                 return 0;
2934         return 1;
2935 }
2936
2937 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2938 {
2939         unsigned long long blocks;
2940         sector_t new;
2941
2942         if (kstrtoull(buf, 10, &blocks) < 0)
2943                 return -EINVAL;
2944
2945         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2946                 return -EINVAL; /* sector conversion overflow */
2947
2948         new = blocks * 2;
2949         if (new != blocks * 2)
2950                 return -EINVAL; /* unsigned long long to sector_t overflow */
2951
2952         *sectors = new;
2953         return 0;
2954 }
2955
2956 static ssize_t
2957 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2958 {
2959         struct mddev *my_mddev = rdev->mddev;
2960         sector_t oldsectors = rdev->sectors;
2961         sector_t sectors;
2962
2963         if (test_bit(Journal, &rdev->flags))
2964                 return -EBUSY;
2965         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2966                 return -EINVAL;
2967         if (rdev->data_offset != rdev->new_data_offset)
2968                 return -EINVAL; /* too confusing */
2969         if (my_mddev->pers && rdev->raid_disk >= 0) {
2970                 if (my_mddev->persistent) {
2971                         sectors = super_types[my_mddev->major_version].
2972                                 rdev_size_change(rdev, sectors);
2973                         if (!sectors)
2974                                 return -EBUSY;
2975                 } else if (!sectors)
2976                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2977                                 rdev->data_offset;
2978                 if (!my_mddev->pers->resize)
2979                         /* Cannot change size for RAID0 or Linear etc */
2980                         return -EINVAL;
2981         }
2982         if (sectors < my_mddev->dev_sectors)
2983                 return -EINVAL; /* component must fit device */
2984
2985         rdev->sectors = sectors;
2986         if (sectors > oldsectors && my_mddev->external) {
2987                 /* Need to check that all other rdevs with the same
2988                  * ->bdev do not overlap.  'rcu' is sufficient to walk
2989                  * the rdev lists safely.
2990                  * This check does not provide a hard guarantee, it
2991                  * just helps avoid dangerous mistakes.
2992                  */
2993                 struct mddev *mddev;
2994                 int overlap = 0;
2995                 struct list_head *tmp;
2996
2997                 rcu_read_lock();
2998                 for_each_mddev(mddev, tmp) {
2999                         struct md_rdev *rdev2;
3000
3001                         rdev_for_each(rdev2, mddev)
3002                                 if (rdev->bdev == rdev2->bdev &&
3003                                     rdev != rdev2 &&
3004                                     overlaps(rdev->data_offset, rdev->sectors,
3005                                              rdev2->data_offset,
3006                                              rdev2->sectors)) {
3007                                         overlap = 1;
3008                                         break;
3009                                 }
3010                         if (overlap) {
3011                                 mddev_put(mddev);
3012                                 break;
3013                         }
3014                 }
3015                 rcu_read_unlock();
3016                 if (overlap) {
3017                         /* Someone else could have slipped in a size
3018                          * change here, but doing so is just silly.
3019                          * We put oldsectors back because we *know* it is
3020                          * safe, and trust userspace not to race with
3021                          * itself
3022                          */
3023                         rdev->sectors = oldsectors;
3024                         return -EBUSY;
3025                 }
3026         }
3027         return len;
3028 }
3029
3030 static struct rdev_sysfs_entry rdev_size =
3031 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3032
3033 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3034 {
3035         unsigned long long recovery_start = rdev->recovery_offset;
3036
3037         if (test_bit(In_sync, &rdev->flags) ||
3038             recovery_start == MaxSector)
3039                 return sprintf(page, "none\n");
3040
3041         return sprintf(page, "%llu\n", recovery_start);
3042 }
3043
3044 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3045 {
3046         unsigned long long recovery_start;
3047
3048         if (cmd_match(buf, "none"))
3049                 recovery_start = MaxSector;
3050         else if (kstrtoull(buf, 10, &recovery_start))
3051                 return -EINVAL;
3052
3053         if (rdev->mddev->pers &&
3054             rdev->raid_disk >= 0)
3055                 return -EBUSY;
3056
3057         rdev->recovery_offset = recovery_start;
3058         if (recovery_start == MaxSector)
3059                 set_bit(In_sync, &rdev->flags);
3060         else
3061                 clear_bit(In_sync, &rdev->flags);
3062         return len;
3063 }
3064
3065 static struct rdev_sysfs_entry rdev_recovery_start =
3066 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3067
3068 /* sysfs access to bad-blocks list.
3069  * We present two files.
3070  * 'bad-blocks' lists sector numbers and lengths of ranges that
3071  *    are recorded as bad.  The list is truncated to fit within
3072  *    the one-page limit of sysfs.
3073  *    Writing "sector length" to this file adds an acknowledged
3074  *    bad block list.
3075  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3076  *    been acknowledged.  Writing to this file adds bad blocks
3077  *    without acknowledging them.  This is largely for testing.
3078  */
3079 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3080 {
3081         return badblocks_show(&rdev->badblocks, page, 0);
3082 }
3083 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3084 {
3085         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3086         /* Maybe that ack was all we needed */
3087         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3088                 wake_up(&rdev->blocked_wait);
3089         return rv;
3090 }
3091 static struct rdev_sysfs_entry rdev_bad_blocks =
3092 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3093
3094 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3095 {
3096         return badblocks_show(&rdev->badblocks, page, 1);
3097 }
3098 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3099 {
3100         return badblocks_store(&rdev->badblocks, page, len, 1);
3101 }
3102 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3103 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3104
3105 static struct attribute *rdev_default_attrs[] = {
3106         &rdev_state.attr,
3107         &rdev_errors.attr,
3108         &rdev_slot.attr,
3109         &rdev_offset.attr,
3110         &rdev_new_offset.attr,
3111         &rdev_size.attr,
3112         &rdev_recovery_start.attr,
3113         &rdev_bad_blocks.attr,
3114         &rdev_unack_bad_blocks.attr,
3115         NULL,
3116 };
3117 static ssize_t
3118 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3119 {
3120         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3121         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3122
3123         if (!entry->show)
3124                 return -EIO;
3125         if (!rdev->mddev)
3126                 return -EBUSY;
3127         return entry->show(rdev, page);
3128 }
3129
3130 static ssize_t
3131 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3132               const char *page, size_t length)
3133 {
3134         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3135         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3136         ssize_t rv;
3137         struct mddev *mddev = rdev->mddev;
3138
3139         if (!entry->store)
3140                 return -EIO;
3141         if (!capable(CAP_SYS_ADMIN))
3142                 return -EACCES;
3143         rv = mddev ? mddev_lock(mddev): -EBUSY;
3144         if (!rv) {
3145                 if (rdev->mddev == NULL)
3146                         rv = -EBUSY;
3147                 else
3148                         rv = entry->store(rdev, page, length);
3149                 mddev_unlock(mddev);
3150         }
3151         return rv;
3152 }
3153
3154 static void rdev_free(struct kobject *ko)
3155 {
3156         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3157         kfree(rdev);
3158 }
3159 static const struct sysfs_ops rdev_sysfs_ops = {
3160         .show           = rdev_attr_show,
3161         .store          = rdev_attr_store,
3162 };
3163 static struct kobj_type rdev_ktype = {
3164         .release        = rdev_free,
3165         .sysfs_ops      = &rdev_sysfs_ops,
3166         .default_attrs  = rdev_default_attrs,
3167 };
3168
3169 int md_rdev_init(struct md_rdev *rdev)
3170 {
3171         rdev->desc_nr = -1;
3172         rdev->saved_raid_disk = -1;
3173         rdev->raid_disk = -1;
3174         rdev->flags = 0;
3175         rdev->data_offset = 0;
3176         rdev->new_data_offset = 0;
3177         rdev->sb_events = 0;
3178         rdev->last_read_error = 0;
3179         rdev->sb_loaded = 0;
3180         rdev->bb_page = NULL;
3181         atomic_set(&rdev->nr_pending, 0);
3182         atomic_set(&rdev->read_errors, 0);
3183         atomic_set(&rdev->corrected_errors, 0);
3184
3185         INIT_LIST_HEAD(&rdev->same_set);
3186         init_waitqueue_head(&rdev->blocked_wait);
3187
3188         /* Add space to store bad block list.
3189          * This reserves the space even on arrays where it cannot
3190          * be used - I wonder if that matters
3191          */
3192         return badblocks_init(&rdev->badblocks, 0);
3193 }
3194 EXPORT_SYMBOL_GPL(md_rdev_init);
3195 /*
3196  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3197  *
3198  * mark the device faulty if:
3199  *
3200  *   - the device is nonexistent (zero size)
3201  *   - the device has no valid superblock
3202  *
3203  * a faulty rdev _never_ has rdev->sb set.
3204  */
3205 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3206 {
3207         char b[BDEVNAME_SIZE];
3208         int err;
3209         struct md_rdev *rdev;
3210         sector_t size;
3211
3212         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3213         if (!rdev)
3214                 return ERR_PTR(-ENOMEM);
3215
3216         err = md_rdev_init(rdev);
3217         if (err)
3218                 goto abort_free;
3219         err = alloc_disk_sb(rdev);
3220         if (err)
3221                 goto abort_free;
3222
3223         err = lock_rdev(rdev, newdev, super_format == -2);
3224         if (err)
3225                 goto abort_free;
3226
3227         kobject_init(&rdev->kobj, &rdev_ktype);
3228
3229         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3230         if (!size) {
3231                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3232                         bdevname(rdev->bdev,b));
3233                 err = -EINVAL;
3234                 goto abort_free;
3235         }
3236
3237         if (super_format >= 0) {
3238                 err = super_types[super_format].
3239                         load_super(rdev, NULL, super_minor);
3240                 if (err == -EINVAL) {
3241                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3242                                 bdevname(rdev->bdev,b),
3243                                 super_format, super_minor);
3244                         goto abort_free;
3245                 }
3246                 if (err < 0) {
3247                         pr_warn("md: could not read %s's sb, not importing!\n",
3248                                 bdevname(rdev->bdev,b));
3249                         goto abort_free;
3250                 }
3251         }
3252
3253         return rdev;
3254
3255 abort_free:
3256         if (rdev->bdev)
3257                 unlock_rdev(rdev);
3258         md_rdev_clear(rdev);
3259         kfree(rdev);
3260         return ERR_PTR(err);
3261 }
3262
3263 /*
3264  * Check a full RAID array for plausibility
3265  */
3266
3267 static void analyze_sbs(struct mddev *mddev)
3268 {
3269         int i;
3270         struct md_rdev *rdev, *freshest, *tmp;
3271         char b[BDEVNAME_SIZE];
3272
3273         freshest = NULL;
3274         rdev_for_each_safe(rdev, tmp, mddev)
3275                 switch (super_types[mddev->major_version].
3276                         load_super(rdev, freshest, mddev->minor_version)) {
3277                 case 1:
3278                         freshest = rdev;
3279                         break;
3280                 case 0:
3281                         break;
3282                 default:
3283                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3284                                 bdevname(rdev->bdev,b));
3285                         md_kick_rdev_from_array(rdev);
3286                 }
3287
3288         super_types[mddev->major_version].
3289                 validate_super(mddev, freshest);
3290
3291         i = 0;
3292         rdev_for_each_safe(rdev, tmp, mddev) {
3293                 if (mddev->max_disks &&
3294                     (rdev->desc_nr >= mddev->max_disks ||
3295                      i > mddev->max_disks)) {
3296                         pr_warn("md: %s: %s: only %d devices permitted\n",
3297                                 mdname(mddev), bdevname(rdev->bdev, b),
3298                                 mddev->max_disks);
3299                         md_kick_rdev_from_array(rdev);
3300                         continue;
3301                 }
3302                 if (rdev != freshest) {
3303                         if (super_types[mddev->major_version].
3304                             validate_super(mddev, rdev)) {
3305                                 pr_warn("md: kicking non-fresh %s from array!\n",
3306                                         bdevname(rdev->bdev,b));
3307                                 md_kick_rdev_from_array(rdev);
3308                                 continue;
3309                         }
3310                 }
3311                 if (mddev->level == LEVEL_MULTIPATH) {
3312                         rdev->desc_nr = i++;
3313                         rdev->raid_disk = rdev->desc_nr;
3314                         set_bit(In_sync, &rdev->flags);
3315                 } else if (rdev->raid_disk >=
3316                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3317                            !test_bit(Journal, &rdev->flags)) {
3318                         rdev->raid_disk = -1;
3319                         clear_bit(In_sync, &rdev->flags);
3320                 }
3321         }
3322 }
3323
3324 /* Read a fixed-point number.
3325  * Numbers in sysfs attributes should be in "standard" units where
3326  * possible, so time should be in seconds.
3327  * However we internally use a a much smaller unit such as
3328  * milliseconds or jiffies.
3329  * This function takes a decimal number with a possible fractional
3330  * component, and produces an integer which is the result of
3331  * multiplying that number by 10^'scale'.
3332  * all without any floating-point arithmetic.
3333  */
3334 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3335 {
3336         unsigned long result = 0;
3337         long decimals = -1;
3338         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3339                 if (*cp == '.')
3340                         decimals = 0;
3341                 else if (decimals < scale) {
3342                         unsigned int value;
3343                         value = *cp - '0';
3344                         result = result * 10 + value;
3345                         if (decimals >= 0)
3346                                 decimals++;
3347                 }
3348                 cp++;
3349         }
3350         if (*cp == '\n')
3351                 cp++;
3352         if (*cp)
3353                 return -EINVAL;
3354         if (decimals < 0)
3355                 decimals = 0;
3356         while (decimals < scale) {
3357                 result *= 10;
3358                 decimals ++;
3359         }
3360         *res = result;
3361         return 0;
3362 }
3363
3364 static ssize_t
3365 safe_delay_show(struct mddev *mddev, char *page)
3366 {
3367         int msec = (mddev->safemode_delay*1000)/HZ;
3368         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3369 }
3370 static ssize_t
3371 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3372 {
3373         unsigned long msec;
3374
3375         if (mddev_is_clustered(mddev)) {
3376                 pr_warn("md: Safemode is disabled for clustered mode\n");
3377                 return -EINVAL;
3378         }
3379
3380         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3381                 return -EINVAL;
3382         if (msec == 0)
3383                 mddev->safemode_delay = 0;
3384         else {
3385                 unsigned long old_delay = mddev->safemode_delay;
3386                 unsigned long new_delay = (msec*HZ)/1000;
3387
3388                 if (new_delay == 0)
3389                         new_delay = 1;
3390                 mddev->safemode_delay = new_delay;
3391                 if (new_delay < old_delay || old_delay == 0)
3392                         mod_timer(&mddev->safemode_timer, jiffies+1);
3393         }
3394         return len;
3395 }
3396 static struct md_sysfs_entry md_safe_delay =
3397 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3398
3399 static ssize_t
3400 level_show(struct mddev *mddev, char *page)
3401 {
3402         struct md_personality *p;
3403         int ret;
3404         spin_lock(&mddev->lock);
3405         p = mddev->pers;
3406         if (p)
3407                 ret = sprintf(page, "%s\n", p->name);
3408         else if (mddev->clevel[0])
3409                 ret = sprintf(page, "%s\n", mddev->clevel);
3410         else if (mddev->level != LEVEL_NONE)
3411                 ret = sprintf(page, "%d\n", mddev->level);
3412         else
3413                 ret = 0;
3414         spin_unlock(&mddev->lock);
3415         return ret;
3416 }
3417
3418 static ssize_t
3419 level_store(struct mddev *mddev, const char *buf, size_t len)
3420 {
3421         char clevel[16];
3422         ssize_t rv;
3423         size_t slen = len;
3424         struct md_personality *pers, *oldpers;
3425         long level;
3426         void *priv, *oldpriv;
3427         struct md_rdev *rdev;
3428
3429         if (slen == 0 || slen >= sizeof(clevel))
3430                 return -EINVAL;
3431
3432         rv = mddev_lock(mddev);
3433         if (rv)
3434                 return rv;
3435
3436         if (mddev->pers == NULL) {
3437                 strncpy(mddev->clevel, buf, slen);
3438                 if (mddev->clevel[slen-1] == '\n')
3439                         slen--;
3440                 mddev->clevel[slen] = 0;
3441                 mddev->level = LEVEL_NONE;
3442                 rv = len;
3443                 goto out_unlock;
3444         }
3445         rv = -EROFS;
3446         if (mddev->ro)
3447                 goto out_unlock;
3448
3449         /* request to change the personality.  Need to ensure:
3450          *  - array is not engaged in resync/recovery/reshape
3451          *  - old personality can be suspended
3452          *  - new personality will access other array.
3453          */
3454
3455         rv = -EBUSY;
3456         if (mddev->sync_thread ||
3457             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3458             mddev->reshape_position != MaxSector ||
3459             mddev->sysfs_active)
3460                 goto out_unlock;
3461
3462         rv = -EINVAL;
3463         if (!mddev->pers->quiesce) {
3464                 pr_warn("md: %s: %s does not support online personality change\n",
3465                         mdname(mddev), mddev->pers->name);
3466                 goto out_unlock;
3467         }
3468
3469         /* Now find the new personality */
3470         strncpy(clevel, buf, slen);
3471         if (clevel[slen-1] == '\n')
3472                 slen--;
3473         clevel[slen] = 0;
3474         if (kstrtol(clevel, 10, &level))
3475                 level = LEVEL_NONE;
3476
3477         if (request_module("md-%s", clevel) != 0)
3478                 request_module("md-level-%s", clevel);
3479         spin_lock(&pers_lock);
3480         pers = find_pers(level, clevel);
3481         if (!pers || !try_module_get(pers->owner)) {
3482                 spin_unlock(&pers_lock);
3483                 pr_warn("md: personality %s not loaded\n", clevel);
3484                 rv = -EINVAL;
3485                 goto out_unlock;
3486         }
3487         spin_unlock(&pers_lock);
3488
3489         if (pers == mddev->pers) {
3490                 /* Nothing to do! */
3491                 module_put(pers->owner);
3492                 rv = len;
3493                 goto out_unlock;
3494         }
3495         if (!pers->takeover) {
3496                 module_put(pers->owner);
3497                 pr_warn("md: %s: %s does not support personality takeover\n",
3498                         mdname(mddev), clevel);
3499                 rv = -EINVAL;
3500                 goto out_unlock;
3501         }
3502
3503         rdev_for_each(rdev, mddev)
3504                 rdev->new_raid_disk = rdev->raid_disk;
3505
3506         /* ->takeover must set new_* and/or delta_disks
3507          * if it succeeds, and may set them when it fails.
3508          */
3509         priv = pers->takeover(mddev);
3510         if (IS_ERR(priv)) {
3511                 mddev->new_level = mddev->level;
3512                 mddev->new_layout = mddev->layout;
3513                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3514                 mddev->raid_disks -= mddev->delta_disks;
3515                 mddev->delta_disks = 0;
3516                 mddev->reshape_backwards = 0;
3517                 module_put(pers->owner);
3518                 pr_warn("md: %s: %s would not accept array\n",
3519                         mdname(mddev), clevel);
3520                 rv = PTR_ERR(priv);
3521                 goto out_unlock;
3522         }
3523
3524         /* Looks like we have a winner */
3525         mddev_suspend(mddev);
3526         mddev_detach(mddev);
3527
3528         spin_lock(&mddev->lock);
3529         oldpers = mddev->pers;
3530         oldpriv = mddev->private;
3531         mddev->pers = pers;
3532         mddev->private = priv;
3533         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3534         mddev->level = mddev->new_level;
3535         mddev->layout = mddev->new_layout;
3536         mddev->chunk_sectors = mddev->new_chunk_sectors;
3537         mddev->delta_disks = 0;
3538         mddev->reshape_backwards = 0;
3539         mddev->degraded = 0;
3540         spin_unlock(&mddev->lock);
3541
3542         if (oldpers->sync_request == NULL &&
3543             mddev->external) {
3544                 /* We are converting from a no-redundancy array
3545                  * to a redundancy array and metadata is managed
3546                  * externally so we need to be sure that writes
3547                  * won't block due to a need to transition
3548                  *      clean->dirty
3549                  * until external management is started.
3550                  */
3551                 mddev->in_sync = 0;
3552                 mddev->safemode_delay = 0;
3553                 mddev->safemode = 0;
3554         }
3555
3556         oldpers->free(mddev, oldpriv);
3557
3558         if (oldpers->sync_request == NULL &&
3559             pers->sync_request != NULL) {
3560                 /* need to add the md_redundancy_group */
3561                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3562                         pr_warn("md: cannot register extra attributes for %s\n",
3563                                 mdname(mddev));
3564                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3565         }
3566         if (oldpers->sync_request != NULL &&
3567             pers->sync_request == NULL) {
3568                 /* need to remove the md_redundancy_group */
3569                 if (mddev->to_remove == NULL)
3570                         mddev->to_remove = &md_redundancy_group;
3571         }
3572
3573         module_put(oldpers->owner);
3574
3575         rdev_for_each(rdev, mddev) {
3576                 if (rdev->raid_disk < 0)
3577                         continue;
3578                 if (rdev->new_raid_disk >= mddev->raid_disks)
3579                         rdev->new_raid_disk = -1;
3580                 if (rdev->new_raid_disk == rdev->raid_disk)
3581                         continue;
3582                 sysfs_unlink_rdev(mddev, rdev);
3583         }
3584         rdev_for_each(rdev, mddev) {
3585                 if (rdev->raid_disk < 0)
3586                         continue;
3587                 if (rdev->new_raid_disk == rdev->raid_disk)
3588                         continue;
3589                 rdev->raid_disk = rdev->new_raid_disk;
3590                 if (rdev->raid_disk < 0)
3591                         clear_bit(In_sync, &rdev->flags);
3592                 else {
3593                         if (sysfs_link_rdev(mddev, rdev))
3594                                 pr_warn("md: cannot register rd%d for %s after level change\n",
3595                                         rdev->raid_disk, mdname(mddev));
3596                 }
3597         }
3598
3599         if (pers->sync_request == NULL) {
3600                 /* this is now an array without redundancy, so
3601                  * it must always be in_sync
3602                  */
3603                 mddev->in_sync = 1;
3604                 del_timer_sync(&mddev->safemode_timer);
3605         }
3606         blk_set_stacking_limits(&mddev->queue->limits);
3607         pers->run(mddev);
3608         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3609         mddev_resume(mddev);
3610         if (!mddev->thread)
3611                 md_update_sb(mddev, 1);
3612         sysfs_notify(&mddev->kobj, NULL, "level");
3613         md_new_event(mddev);
3614         rv = len;
3615 out_unlock:
3616         mddev_unlock(mddev);
3617         return rv;
3618 }
3619
3620 static struct md_sysfs_entry md_level =
3621 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3622
3623 static ssize_t
3624 layout_show(struct mddev *mddev, char *page)
3625 {
3626         /* just a number, not meaningful for all levels */
3627         if (mddev->reshape_position != MaxSector &&
3628             mddev->layout != mddev->new_layout)
3629                 return sprintf(page, "%d (%d)\n",
3630                                mddev->new_layout, mddev->layout);
3631         return sprintf(page, "%d\n", mddev->layout);
3632 }
3633
3634 static ssize_t
3635 layout_store(struct mddev *mddev, const char *buf, size_t len)
3636 {
3637         unsigned int n;
3638         int err;
3639
3640         err = kstrtouint(buf, 10, &n);
3641         if (err < 0)
3642                 return err;
3643         err = mddev_lock(mddev);
3644         if (err)
3645                 return err;
3646
3647         if (mddev->pers) {
3648                 if (mddev->pers->check_reshape == NULL)
3649                         err = -EBUSY;
3650                 else if (mddev->ro)
3651                         err = -EROFS;
3652                 else {
3653                         mddev->new_layout = n;
3654                         err = mddev->pers->check_reshape(mddev);
3655                         if (err)
3656                                 mddev->new_layout = mddev->layout;
3657                 }
3658         } else {
3659                 mddev->new_layout = n;
3660                 if (mddev->reshape_position == MaxSector)
3661                         mddev->layout = n;
3662         }
3663         mddev_unlock(mddev);
3664         return err ?: len;
3665 }
3666 static struct md_sysfs_entry md_layout =
3667 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3668
3669 static ssize_t
3670 raid_disks_show(struct mddev *mddev, char *page)
3671 {
3672         if (mddev->raid_disks == 0)
3673                 return 0;
3674         if (mddev->reshape_position != MaxSector &&
3675             mddev->delta_disks != 0)
3676                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3677                                mddev->raid_disks - mddev->delta_disks);
3678         return sprintf(page, "%d\n", mddev->raid_disks);
3679 }
3680
3681 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3682
3683 static ssize_t
3684 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3685 {
3686         unsigned int n;
3687         int err;
3688
3689         err = kstrtouint(buf, 10, &n);
3690         if (err < 0)
3691                 return err;
3692
3693         err = mddev_lock(mddev);
3694         if (err)
3695                 return err;
3696         if (mddev->pers)
3697                 err = update_raid_disks(mddev, n);
3698         else if (mddev->reshape_position != MaxSector) {
3699                 struct md_rdev *rdev;
3700                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3701
3702                 err = -EINVAL;
3703                 rdev_for_each(rdev, mddev) {
3704                         if (olddisks < n &&
3705                             rdev->data_offset < rdev->new_data_offset)
3706                                 goto out_unlock;
3707                         if (olddisks > n &&
3708                             rdev->data_offset > rdev->new_data_offset)
3709                                 goto out_unlock;
3710                 }
3711                 err = 0;
3712                 mddev->delta_disks = n - olddisks;
3713                 mddev->raid_disks = n;
3714                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3715         } else
3716                 mddev->raid_disks = n;
3717 out_unlock:
3718         mddev_unlock(mddev);
3719         return err ? err : len;
3720 }
3721 static struct md_sysfs_entry md_raid_disks =
3722 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3723
3724 static ssize_t
3725 chunk_size_show(struct mddev *mddev, char *page)
3726 {
3727         if (mddev->reshape_position != MaxSector &&
3728             mddev->chunk_sectors != mddev->new_chunk_sectors)
3729                 return sprintf(page, "%d (%d)\n",
3730                                mddev->new_chunk_sectors << 9,
3731                                mddev->chunk_sectors << 9);
3732         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3733 }
3734
3735 static ssize_t
3736 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3737 {
3738         unsigned long n;
3739         int err;
3740
3741         err = kstrtoul(buf, 10, &n);
3742         if (err < 0)
3743                 return err;
3744
3745         err = mddev_lock(mddev);
3746         if (err)
3747                 return err;
3748         if (mddev->pers) {
3749                 if (mddev->pers->check_reshape == NULL)
3750                         err = -EBUSY;
3751                 else if (mddev->ro)
3752                         err = -EROFS;
3753                 else {
3754                         mddev->new_chunk_sectors = n >> 9;
3755                         err = mddev->pers->check_reshape(mddev);
3756                         if (err)
3757                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3758                 }
3759         } else {
3760                 mddev->new_chunk_sectors = n >> 9;
3761                 if (mddev->reshape_position == MaxSector)
3762                         mddev->chunk_sectors = n >> 9;
3763         }
3764         mddev_unlock(mddev);
3765         return err ?: len;
3766 }
3767 static struct md_sysfs_entry md_chunk_size =
3768 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3769
3770 static ssize_t
3771 resync_start_show(struct mddev *mddev, char *page)
3772 {
3773         if (mddev->recovery_cp == MaxSector)
3774                 return sprintf(page, "none\n");
3775         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3776 }
3777
3778 static ssize_t
3779 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3780 {
3781         unsigned long long n;
3782         int err;
3783
3784         if (cmd_match(buf, "none"))
3785                 n = MaxSector;
3786         else {
3787                 err = kstrtoull(buf, 10, &n);
3788                 if (err < 0)
3789                         return err;
3790                 if (n != (sector_t)n)
3791                         return -EINVAL;
3792         }
3793
3794         err = mddev_lock(mddev);
3795         if (err)
3796                 return err;
3797         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3798                 err = -EBUSY;
3799
3800         if (!err) {
3801                 mddev->recovery_cp = n;
3802                 if (mddev->pers)
3803                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3804         }
3805         mddev_unlock(mddev);
3806         return err ?: len;
3807 }
3808 static struct md_sysfs_entry md_resync_start =
3809 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3810                 resync_start_show, resync_start_store);
3811
3812 /*
3813  * The array state can be:
3814  *
3815  * clear
3816  *     No devices, no size, no level
3817  *     Equivalent to STOP_ARRAY ioctl
3818  * inactive
3819  *     May have some settings, but array is not active
3820  *        all IO results in error
3821  *     When written, doesn't tear down array, but just stops it
3822  * suspended (not supported yet)
3823  *     All IO requests will block. The array can be reconfigured.
3824  *     Writing this, if accepted, will block until array is quiescent
3825  * readonly
3826  *     no resync can happen.  no superblocks get written.
3827  *     write requests fail
3828  * read-auto
3829  *     like readonly, but behaves like 'clean' on a write request.
3830  *
3831  * clean - no pending writes, but otherwise active.
3832  *     When written to inactive array, starts without resync
3833  *     If a write request arrives then
3834  *       if metadata is known, mark 'dirty' and switch to 'active'.
3835  *       if not known, block and switch to write-pending
3836  *     If written to an active array that has pending writes, then fails.
3837  * active
3838  *     fully active: IO and resync can be happening.
3839  *     When written to inactive array, starts with resync
3840  *
3841  * write-pending
3842  *     clean, but writes are blocked waiting for 'active' to be written.
3843  *
3844  * active-idle
3845  *     like active, but no writes have been seen for a while (100msec).
3846  *
3847  */
3848 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3849                    write_pending, active_idle, bad_word};
3850 static char *array_states[] = {
3851         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3852         "write-pending", "active-idle", NULL };
3853
3854 static int match_word(const char *word, char **list)
3855 {
3856         int n;
3857         for (n=0; list[n]; n++)
3858                 if (cmd_match(word, list[n]))
3859                         break;
3860         return n;
3861 }
3862
3863 static ssize_t
3864 array_state_show(struct mddev *mddev, char *page)
3865 {
3866         enum array_state st = inactive;
3867
3868         if (mddev->pers)
3869                 switch(mddev->ro) {
3870                 case 1:
3871                         st = readonly;
3872                         break;
3873                 case 2:
3874                         st = read_auto;
3875                         break;
3876                 case 0:
3877                         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3878                                 st = write_pending;
3879                         else if (mddev->in_sync)
3880                                 st = clean;
3881                         else if (mddev->safemode)
3882                                 st = active_idle;
3883                         else
3884                                 st = active;
3885                 }
3886         else {
3887                 if (list_empty(&mddev->disks) &&
3888                     mddev->raid_disks == 0 &&
3889                     mddev->dev_sectors == 0)
3890                         st = clear;
3891                 else
3892                         st = inactive;
3893         }
3894         return sprintf(page, "%s\n", array_states[st]);
3895 }
3896
3897 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3898 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3899 static int do_md_run(struct mddev *mddev);
3900 static int restart_array(struct mddev *mddev);
3901
3902 static ssize_t
3903 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3904 {
3905         int err;
3906         enum array_state st = match_word(buf, array_states);
3907
3908         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3909                 /* don't take reconfig_mutex when toggling between
3910                  * clean and active
3911                  */
3912                 spin_lock(&mddev->lock);
3913                 if (st == active) {
3914                         restart_array(mddev);
3915                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3916                         md_wakeup_thread(mddev->thread);
3917                         wake_up(&mddev->sb_wait);
3918                         err = 0;
3919                 } else /* st == clean */ {
3920                         restart_array(mddev);
3921                         if (atomic_read(&mddev->writes_pending) == 0) {
3922                                 if (mddev->in_sync == 0) {
3923                                         mddev->in_sync = 1;
3924                                         if (mddev->safemode == 1)
3925                                                 mddev->safemode = 0;
3926                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3927                                 }
3928                                 err = 0;
3929                         } else
3930                                 err = -EBUSY;
3931                 }
3932                 if (!err)
3933                         sysfs_notify_dirent_safe(mddev->sysfs_state);
3934                 spin_unlock(&mddev->lock);
3935                 return err ?: len;
3936         }
3937         err = mddev_lock(mddev);
3938         if (err)
3939                 return err;
3940         err = -EINVAL;
3941         switch(st) {
3942         case bad_word:
3943                 break;
3944         case clear:
3945                 /* stopping an active array */
3946                 err = do_md_stop(mddev, 0, NULL);
3947                 break;
3948         case inactive:
3949                 /* stopping an active array */
3950                 if (mddev->pers)
3951                         err = do_md_stop(mddev, 2, NULL);
3952                 else
3953                         err = 0; /* already inactive */
3954                 break;
3955         case suspended:
3956                 break; /* not supported yet */
3957         case readonly:
3958                 if (mddev->pers)
3959                         err = md_set_readonly(mddev, NULL);
3960                 else {
3961                         mddev->ro = 1;
3962                         set_disk_ro(mddev->gendisk, 1);
3963                         err = do_md_run(mddev);
3964                 }
3965                 break;
3966         case read_auto:
3967                 if (mddev->pers) {
3968                         if (mddev->ro == 0)
3969                                 err = md_set_readonly(mddev, NULL);
3970                         else if (mddev->ro == 1)
3971                                 err = restart_array(mddev);
3972                         if (err == 0) {
3973                                 mddev->ro = 2;
3974                                 set_disk_ro(mddev->gendisk, 0);
3975                         }
3976                 } else {
3977                         mddev->ro = 2;
3978                         err = do_md_run(mddev);
3979                 }
3980                 break;
3981         case clean:
3982                 if (mddev->pers) {
3983                         err = restart_array(mddev);
3984                         if (err)
3985                                 break;
3986                         spin_lock(&mddev->lock);
3987                         if (atomic_read(&mddev->writes_pending) == 0) {
3988                                 if (mddev->in_sync == 0) {
3989                                         mddev->in_sync = 1;
3990                                         if (mddev->safemode == 1)
3991                                                 mddev->safemode = 0;
3992                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3993                                 }
3994                                 err = 0;
3995                         } else
3996                                 err = -EBUSY;
3997                         spin_unlock(&mddev->lock);
3998                 } else
3999                         err = -EINVAL;
4000                 break;
4001         case active:
4002                 if (mddev->pers) {
4003                         err = restart_array(mddev);
4004                         if (err)
4005                                 break;
4006                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
4007                         wake_up(&mddev->sb_wait);
4008                         err = 0;
4009                 } else {
4010                         mddev->ro = 0;
4011                         set_disk_ro(mddev->gendisk, 0);
4012                         err = do_md_run(mddev);
4013                 }
4014                 break;
4015         case write_pending:
4016         case active_idle:
4017                 /* these cannot be set */
4018                 break;
4019         }
4020
4021         if (!err) {
4022                 if (mddev->hold_active == UNTIL_IOCTL)
4023                         mddev->hold_active = 0;
4024                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4025         }
4026         mddev_unlock(mddev);
4027         return err ?: len;
4028 }
4029 static struct md_sysfs_entry md_array_state =
4030 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4031
4032 static ssize_t
4033 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4034         return sprintf(page, "%d\n",
4035                        atomic_read(&mddev->max_corr_read_errors));
4036 }
4037
4038 static ssize_t
4039 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4040 {
4041         unsigned int n;
4042         int rv;
4043
4044         rv = kstrtouint(buf, 10, &n);
4045         if (rv < 0)
4046                 return rv;
4047         atomic_set(&mddev->max_corr_read_errors, n);
4048         return len;
4049 }
4050
4051 static struct md_sysfs_entry max_corr_read_errors =
4052 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4053         max_corrected_read_errors_store);
4054
4055 static ssize_t
4056 null_show(struct mddev *mddev, char *page)
4057 {
4058         return -EINVAL;
4059 }
4060
4061 static ssize_t
4062 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4063 {
4064         /* buf must be %d:%d\n? giving major and minor numbers */
4065         /* The new device is added to the array.
4066          * If the array has a persistent superblock, we read the
4067          * superblock to initialise info and check validity.
4068          * Otherwise, only checking done is that in bind_rdev_to_array,
4069          * which mainly checks size.
4070          */
4071         char *e;
4072         int major = simple_strtoul(buf, &e, 10);
4073         int minor;
4074         dev_t dev;
4075         struct md_rdev *rdev;
4076         int err;
4077
4078         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4079                 return -EINVAL;
4080         minor = simple_strtoul(e+1, &e, 10);
4081         if (*e && *e != '\n')
4082                 return -EINVAL;
4083         dev = MKDEV(major, minor);
4084         if (major != MAJOR(dev) ||
4085             minor != MINOR(dev))
4086                 return -EOVERFLOW;
4087
4088         flush_workqueue(md_misc_wq);
4089
4090         err = mddev_lock(mddev);
4091         if (err)
4092                 return err;
4093         if (mddev->persistent) {
4094                 rdev = md_import_device(dev, mddev->major_version,
4095                                         mddev->minor_version);
4096                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4097                         struct md_rdev *rdev0
4098                                 = list_entry(mddev->disks.next,
4099                                              struct md_rdev, same_set);
4100                         err = super_types[mddev->major_version]
4101                                 .load_super(rdev, rdev0, mddev->minor_version);
4102                         if (err < 0)
4103                                 goto out;
4104                 }
4105         } else if (mddev->external)
4106                 rdev = md_import_device(dev, -2, -1);
4107         else
4108                 rdev = md_import_device(dev, -1, -1);
4109
4110         if (IS_ERR(rdev)) {
4111                 mddev_unlock(mddev);
4112                 return PTR_ERR(rdev);
4113         }
4114         err = bind_rdev_to_array(rdev, mddev);
4115  out:
4116         if (err)
4117                 export_rdev(rdev);
4118         mddev_unlock(mddev);
4119         return err ? err : len;
4120 }
4121
4122 static struct md_sysfs_entry md_new_device =
4123 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4124
4125 static ssize_t
4126 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4127 {
4128         char *end;
4129         unsigned long chunk, end_chunk;
4130         int err;
4131
4132         err = mddev_lock(mddev);
4133         if (err)
4134                 return err;
4135         if (!mddev->bitmap)
4136                 goto out;
4137         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4138         while (*buf) {
4139                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4140                 if (buf == end) break;
4141                 if (*end == '-') { /* range */
4142                         buf = end + 1;
4143                         end_chunk = simple_strtoul(buf, &end, 0);
4144                         if (buf == end) break;
4145                 }
4146                 if (*end && !isspace(*end)) break;
4147                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4148                 buf = skip_spaces(end);
4149         }
4150         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4151 out:
4152         mddev_unlock(mddev);
4153         return len;
4154 }
4155
4156 static struct md_sysfs_entry md_bitmap =
4157 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4158
4159 static ssize_t
4160 size_show(struct mddev *mddev, char *page)
4161 {
4162         return sprintf(page, "%llu\n",
4163                 (unsigned long long)mddev->dev_sectors / 2);
4164 }
4165
4166 static int update_size(struct mddev *mddev, sector_t num_sectors);
4167
4168 static ssize_t
4169 size_store(struct mddev *mddev, const char *buf, size_t len)
4170 {
4171         /* If array is inactive, we can reduce the component size, but
4172          * not increase it (except from 0).
4173          * If array is active, we can try an on-line resize
4174          */
4175         sector_t sectors;
4176         int err = strict_blocks_to_sectors(buf, &sectors);
4177
4178         if (err < 0)
4179                 return err;
4180         err = mddev_lock(mddev);
4181         if (err)
4182                 return err;
4183         if (mddev->pers) {
4184                 err = update_size(mddev, sectors);
4185                 if (err == 0)
4186                         md_update_sb(mddev, 1);
4187         } else {
4188                 if (mddev->dev_sectors == 0 ||
4189                     mddev->dev_sectors > sectors)
4190                         mddev->dev_sectors = sectors;
4191                 else
4192                         err = -ENOSPC;
4193         }
4194         mddev_unlock(mddev);
4195         return err ? err : len;
4196 }
4197
4198 static struct md_sysfs_entry md_size =
4199 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4200
4201 /* Metadata version.
4202  * This is one of
4203  *   'none' for arrays with no metadata (good luck...)
4204  *   'external' for arrays with externally managed metadata,
4205  * or N.M for internally known formats
4206  */
4207 static ssize_t
4208 metadata_show(struct mddev *mddev, char *page)
4209 {
4210         if (mddev->persistent)
4211                 return sprintf(page, "%d.%d\n",
4212                                mddev->major_version, mddev->minor_version);
4213         else if (mddev->external)
4214                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4215         else
4216                 return sprintf(page, "none\n");
4217 }
4218
4219 static ssize_t
4220 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4221 {
4222         int major, minor;
4223         char *e;
4224         int err;
4225         /* Changing the details of 'external' metadata is
4226          * always permitted.  Otherwise there must be
4227          * no devices attached to the array.
4228          */
4229
4230         err = mddev_lock(mddev);
4231         if (err)
4232                 return err;
4233         err = -EBUSY;
4234         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4235                 ;
4236         else if (!list_empty(&mddev->disks))
4237                 goto out_unlock;
4238
4239         err = 0;
4240         if (cmd_match(buf, "none")) {
4241                 mddev->persistent = 0;
4242                 mddev->external = 0;
4243                 mddev->major_version = 0;
4244                 mddev->minor_version = 90;
4245                 goto out_unlock;
4246         }
4247         if (strncmp(buf, "external:", 9) == 0) {
4248                 size_t namelen = len-9;
4249                 if (namelen >= sizeof(mddev->metadata_type))
4250                         namelen = sizeof(mddev->metadata_type)-1;
4251                 strncpy(mddev->metadata_type, buf+9, namelen);
4252                 mddev->metadata_type[namelen] = 0;
4253                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4254                         mddev->metadata_type[--namelen] = 0;
4255                 mddev->persistent = 0;
4256                 mddev->external = 1;
4257                 mddev->major_version = 0;
4258                 mddev->minor_version = 90;
4259                 goto out_unlock;
4260         }
4261         major = simple_strtoul(buf, &e, 10);
4262         err = -EINVAL;
4263         if (e==buf || *e != '.')
4264                 goto out_unlock;
4265         buf = e+1;
4266         minor = simple_strtoul(buf, &e, 10);
4267         if (e==buf || (*e && *e != '\n') )
4268                 goto out_unlock;
4269         err = -ENOENT;
4270         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4271                 goto out_unlock;
4272         mddev->major_version = major;
4273         mddev->minor_version = minor;
4274         mddev->persistent = 1;
4275         mddev->external = 0;
4276         err = 0;
4277 out_unlock:
4278         mddev_unlock(mddev);
4279         return err ?: len;
4280 }
4281
4282 static struct md_sysfs_entry md_metadata =
4283 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4284
4285 static ssize_t
4286 action_show(struct mddev *mddev, char *page)
4287 {
4288         char *type = "idle";
4289         unsigned long recovery = mddev->recovery;
4290         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4291                 type = "frozen";
4292         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4293             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4294                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4295                         type = "reshape";
4296                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4297                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4298                                 type = "resync";
4299                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4300                                 type = "check";
4301                         else
4302                                 type = "repair";
4303                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4304                         type = "recover";
4305                 else if (mddev->reshape_position != MaxSector)
4306                         type = "reshape";
4307         }
4308         return sprintf(page, "%s\n", type);
4309 }
4310
4311 static ssize_t
4312 action_store(struct mddev *mddev, const char *page, size_t len)
4313 {
4314         if (!mddev->pers || !mddev->pers->sync_request)
4315                 return -EINVAL;
4316
4317
4318         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4319                 if (cmd_match(page, "frozen"))
4320                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4321                 else
4322                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4323                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4324                     mddev_lock(mddev) == 0) {
4325                         flush_workqueue(md_misc_wq);
4326                         if (mddev->sync_thread) {
4327                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4328                                 md_reap_sync_thread(mddev);
4329                         }
4330                         mddev_unlock(mddev);
4331                 }
4332         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4333                 return -EBUSY;
4334         else if (cmd_match(page, "resync"))
4335                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4336         else if (cmd_match(page, "recover")) {
4337                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4338                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4339         } else if (cmd_match(page, "reshape")) {
4340                 int err;
4341                 if (mddev->pers->start_reshape == NULL)
4342                         return -EINVAL;
4343                 err = mddev_lock(mddev);
4344                 if (!err) {
4345                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4346                                 err =  -EBUSY;
4347                         else {
4348                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4349                                 err = mddev->pers->start_reshape(mddev);
4350                         }
4351                         mddev_unlock(mddev);
4352                 }
4353                 if (err)
4354                         return err;
4355                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4356         } else {
4357                 if (cmd_match(page, "check"))
4358                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4359                 else if (!cmd_match(page, "repair"))
4360                         return -EINVAL;
4361                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4362                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4363                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4364         }
4365         if (mddev->ro == 2) {
4366                 /* A write to sync_action is enough to justify
4367                  * canceling read-auto mode
4368                  */
4369                 mddev->ro = 0;
4370                 md_wakeup_thread(mddev->sync_thread);
4371         }
4372         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4373         md_wakeup_thread(mddev->thread);
4374         sysfs_notify_dirent_safe(mddev->sysfs_action);
4375         return len;
4376 }
4377
4378 static struct md_sysfs_entry md_scan_mode =
4379 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4380
4381 static ssize_t
4382 last_sync_action_show(struct mddev *mddev, char *page)
4383 {
4384         return sprintf(page, "%s\n", mddev->last_sync_action);
4385 }
4386
4387 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4388
4389 static ssize_t
4390 mismatch_cnt_show(struct mddev *mddev, char *page)
4391 {
4392         return sprintf(page, "%llu\n",
4393                        (unsigned long long)
4394                        atomic64_read(&mddev->resync_mismatches));
4395 }
4396
4397 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4398
4399 static ssize_t
4400 sync_min_show(struct mddev *mddev, char *page)
4401 {
4402         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4403                        mddev->sync_speed_min ? "local": "system");
4404 }
4405
4406 static ssize_t
4407 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4408 {
4409         unsigned int min;
4410         int rv;
4411
4412         if (strncmp(buf, "system", 6)==0) {
4413                 min = 0;
4414         } else {
4415                 rv = kstrtouint(buf, 10, &min);
4416                 if (rv < 0)
4417                         return rv;
4418                 if (min == 0)
4419                         return -EINVAL;
4420         }
4421         mddev->sync_speed_min = min;
4422         return len;
4423 }
4424
4425 static struct md_sysfs_entry md_sync_min =
4426 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4427
4428 static ssize_t
4429 sync_max_show(struct mddev *mddev, char *page)
4430 {
4431         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4432                        mddev->sync_speed_max ? "local": "system");
4433 }
4434
4435 static ssize_t
4436 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4437 {
4438         unsigned int max;
4439         int rv;
4440
4441         if (strncmp(buf, "system", 6)==0) {
4442                 max = 0;
4443         } else {
4444                 rv = kstrtouint(buf, 10, &max);
4445                 if (rv < 0)
4446                         return rv;
4447                 if (max == 0)
4448                         return -EINVAL;
4449         }
4450         mddev->sync_speed_max = max;
4451         return len;
4452 }
4453
4454 static struct md_sysfs_entry md_sync_max =
4455 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4456
4457 static ssize_t
4458 degraded_show(struct mddev *mddev, char *page)
4459 {
4460         return sprintf(page, "%d\n", mddev->degraded);
4461 }
4462 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4463
4464 static ssize_t
4465 sync_force_parallel_show(struct mddev *mddev, char *page)
4466 {
4467         return sprintf(page, "%d\n", mddev->parallel_resync);
4468 }
4469
4470 static ssize_t
4471 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4472 {
4473         long n;
4474
4475         if (kstrtol(buf, 10, &n))
4476                 return -EINVAL;
4477
4478         if (n != 0 && n != 1)
4479                 return -EINVAL;
4480
4481         mddev->parallel_resync = n;
4482
4483         if (mddev->sync_thread)
4484                 wake_up(&resync_wait);
4485
4486         return len;
4487 }
4488
4489 /* force parallel resync, even with shared block devices */
4490 static struct md_sysfs_entry md_sync_force_parallel =
4491 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4492        sync_force_parallel_show, sync_force_parallel_store);
4493
4494 static ssize_t
4495 sync_speed_show(struct mddev *mddev, char *page)
4496 {
4497         unsigned long resync, dt, db;
4498         if (mddev->curr_resync == 0)
4499                 return sprintf(page, "none\n");
4500         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4501         dt = (jiffies - mddev->resync_mark) / HZ;
4502         if (!dt) dt++;
4503         db = resync - mddev->resync_mark_cnt;
4504         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4505 }
4506
4507 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4508
4509 static ssize_t
4510 sync_completed_show(struct mddev *mddev, char *page)
4511 {
4512         unsigned long long max_sectors, resync;
4513
4514         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4515                 return sprintf(page, "none\n");
4516
4517         if (mddev->curr_resync == 1 ||
4518             mddev->curr_resync == 2)
4519                 return sprintf(page, "delayed\n");
4520
4521         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4522             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4523                 max_sectors = mddev->resync_max_sectors;
4524         else
4525                 max_sectors = mddev->dev_sectors;
4526
4527         resync = mddev->curr_resync_completed;
4528         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4529 }
4530
4531 static struct md_sysfs_entry md_sync_completed =
4532         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4533
4534 static ssize_t
4535 min_sync_show(struct mddev *mddev, char *page)
4536 {
4537         return sprintf(page, "%llu\n",
4538                        (unsigned long long)mddev->resync_min);
4539 }
4540 static ssize_t
4541 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4542 {
4543         unsigned long long min;
4544         int err;
4545
4546         if (kstrtoull(buf, 10, &min))
4547                 return -EINVAL;
4548
4549         spin_lock(&mddev->lock);
4550         err = -EINVAL;
4551         if (min > mddev->resync_max)
4552                 goto out_unlock;
4553
4554         err = -EBUSY;
4555         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4556                 goto out_unlock;
4557
4558         /* Round down to multiple of 4K for safety */
4559         mddev->resync_min = round_down(min, 8);
4560         err = 0;
4561
4562 out_unlock:
4563         spin_unlock(&mddev->lock);
4564         return err ?: len;
4565 }
4566
4567 static struct md_sysfs_entry md_min_sync =
4568 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4569
4570 static ssize_t
4571 max_sync_show(struct mddev *mddev, char *page)
4572 {
4573         if (mddev->resync_max == MaxSector)
4574                 return sprintf(page, "max\n");
4575         else
4576                 return sprintf(page, "%llu\n",
4577                                (unsigned long long)mddev->resync_max);
4578 }
4579 static ssize_t
4580 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4581 {
4582         int err;
4583         spin_lock(&mddev->lock);
4584         if (strncmp(buf, "max", 3) == 0)
4585                 mddev->resync_max = MaxSector;
4586         else {
4587                 unsigned long long max;
4588                 int chunk;
4589
4590                 err = -EINVAL;
4591                 if (kstrtoull(buf, 10, &max))
4592                         goto out_unlock;
4593                 if (max < mddev->resync_min)
4594                         goto out_unlock;
4595
4596                 err = -EBUSY;
4597                 if (max < mddev->resync_max &&
4598                     mddev->ro == 0 &&
4599                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4600                         goto out_unlock;
4601
4602                 /* Must be a multiple of chunk_size */
4603                 chunk = mddev->chunk_sectors;
4604                 if (chunk) {
4605                         sector_t temp = max;
4606
4607                         err = -EINVAL;
4608                         if (sector_div(temp, chunk))
4609                                 goto out_unlock;
4610                 }
4611                 mddev->resync_max = max;
4612         }
4613         wake_up(&mddev->recovery_wait);
4614         err = 0;
4615 out_unlock:
4616         spin_unlock(&mddev->lock);
4617         return err ?: len;
4618 }
4619
4620 static struct md_sysfs_entry md_max_sync =
4621 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4622
4623 static ssize_t
4624 suspend_lo_show(struct mddev *mddev, char *page)
4625 {
4626         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4627 }
4628
4629 static ssize_t
4630 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4631 {
4632         unsigned long long old, new;
4633         int err;
4634
4635         err = kstrtoull(buf, 10, &new);
4636         if (err < 0)
4637                 return err;
4638         if (new != (sector_t)new)
4639                 return -EINVAL;
4640
4641         err = mddev_lock(mddev);
4642         if (err)
4643                 return err;
4644         err = -EINVAL;
4645         if (mddev->pers == NULL ||
4646             mddev->pers->quiesce == NULL)
4647                 goto unlock;
4648         old = mddev->suspend_lo;
4649         mddev->suspend_lo = new;
4650         if (new >= old)
4651                 /* Shrinking suspended region */
4652                 mddev->pers->quiesce(mddev, 2);
4653         else {
4654                 /* Expanding suspended region - need to wait */
4655                 mddev->pers->quiesce(mddev, 1);
4656                 mddev->pers->quiesce(mddev, 0);
4657         }
4658         err = 0;
4659 unlock:
4660         mddev_unlock(mddev);
4661         return err ?: len;
4662 }
4663 static struct md_sysfs_entry md_suspend_lo =
4664 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4665
4666 static ssize_t
4667 suspend_hi_show(struct mddev *mddev, char *page)
4668 {
4669         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4670 }
4671
4672 static ssize_t
4673 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4674 {
4675         unsigned long long old, new;
4676         int err;
4677
4678         err = kstrtoull(buf, 10, &new);
4679         if (err < 0)
4680                 return err;
4681         if (new != (sector_t)new)
4682                 return -EINVAL;
4683
4684         err = mddev_lock(mddev);
4685         if (err)
4686                 return err;
4687         err = -EINVAL;
4688         if (mddev->pers == NULL ||
4689             mddev->pers->quiesce == NULL)
4690                 goto unlock;
4691         old = mddev->suspend_hi;
4692         mddev->suspend_hi = new;
4693         if (new <= old)
4694                 /* Shrinking suspended region */
4695                 mddev->pers->quiesce(mddev, 2);
4696         else {
4697                 /* Expanding suspended region - need to wait */
4698                 mddev->pers->quiesce(mddev, 1);
4699                 mddev->pers->quiesce(mddev, 0);
4700         }
4701         err = 0;
4702 unlock:
4703         mddev_unlock(mddev);
4704         return err ?: len;
4705 }
4706 static struct md_sysfs_entry md_suspend_hi =
4707 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4708
4709 static ssize_t
4710 reshape_position_show(struct mddev *mddev, char *page)
4711 {
4712         if (mddev->reshape_position != MaxSector)
4713                 return sprintf(page, "%llu\n",
4714                                (unsigned long long)mddev->reshape_position);
4715         strcpy(page, "none\n");
4716         return 5;
4717 }
4718
4719 static ssize_t
4720 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4721 {
4722         struct md_rdev *rdev;
4723         unsigned long long new;
4724         int err;
4725
4726         err = kstrtoull(buf, 10, &new);
4727         if (err < 0)
4728                 return err;
4729         if (new != (sector_t)new)
4730                 return -EINVAL;
4731         err = mddev_lock(mddev);
4732         if (err)
4733                 return err;
4734         err = -EBUSY;
4735         if (mddev->pers)
4736                 goto unlock;
4737         mddev->reshape_position = new;
4738         mddev->delta_disks = 0;
4739         mddev->reshape_backwards = 0;
4740         mddev->new_level = mddev->level;
4741         mddev->new_layout = mddev->layout;
4742         mddev->new_chunk_sectors = mddev->chunk_sectors;
4743         rdev_for_each(rdev, mddev)
4744                 rdev->new_data_offset = rdev->data_offset;
4745         err = 0;
4746 unlock:
4747         mddev_unlock(mddev);
4748         return err ?: len;
4749 }
4750
4751 static struct md_sysfs_entry md_reshape_position =
4752 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4753        reshape_position_store);
4754
4755 static ssize_t
4756 reshape_direction_show(struct mddev *mddev, char *page)
4757 {
4758         return sprintf(page, "%s\n",
4759                        mddev->reshape_backwards ? "backwards" : "forwards");
4760 }
4761
4762 static ssize_t
4763 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4764 {
4765         int backwards = 0;
4766         int err;
4767
4768         if (cmd_match(buf, "forwards"))
4769                 backwards = 0;
4770         else if (cmd_match(buf, "backwards"))
4771                 backwards = 1;
4772         else
4773                 return -EINVAL;
4774         if (mddev->reshape_backwards == backwards)
4775                 return len;
4776
4777         err = mddev_lock(mddev);
4778         if (err)
4779                 return err;
4780         /* check if we are allowed to change */
4781         if (mddev->delta_disks)
4782                 err = -EBUSY;
4783         else if (mddev->persistent &&
4784             mddev->major_version == 0)
4785                 err =  -EINVAL;
4786         else
4787                 mddev->reshape_backwards = backwards;
4788         mddev_unlock(mddev);
4789         return err ?: len;
4790 }
4791
4792 static struct md_sysfs_entry md_reshape_direction =
4793 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4794        reshape_direction_store);
4795
4796 static ssize_t
4797 array_size_show(struct mddev *mddev, char *page)
4798 {
4799         if (mddev->external_size)
4800                 return sprintf(page, "%llu\n",
4801                                (unsigned long long)mddev->array_sectors/2);
4802         else
4803                 return sprintf(page, "default\n");
4804 }
4805
4806 static ssize_t
4807 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4808 {
4809         sector_t sectors;
4810         int err;
4811
4812         err = mddev_lock(mddev);
4813         if (err)
4814                 return err;
4815
4816         /* cluster raid doesn't support change array_sectors */
4817         if (mddev_is_clustered(mddev))
4818                 return -EINVAL;
4819
4820         if (strncmp(buf, "default", 7) == 0) {
4821                 if (mddev->pers)
4822                         sectors = mddev->pers->size(mddev, 0, 0);
4823                 else
4824                         sectors = mddev->array_sectors;
4825
4826                 mddev->external_size = 0;
4827         } else {
4828                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4829                         err = -EINVAL;
4830                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4831                         err = -E2BIG;
4832                 else
4833                         mddev->external_size = 1;
4834         }
4835
4836         if (!err) {
4837                 mddev->array_sectors = sectors;
4838                 if (mddev->pers) {
4839                         set_capacity(mddev->gendisk, mddev->array_sectors);
4840                         revalidate_disk(mddev->gendisk);
4841                 }
4842         }
4843         mddev_unlock(mddev);
4844         return err ?: len;
4845 }
4846
4847 static struct md_sysfs_entry md_array_size =
4848 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4849        array_size_store);
4850
4851 static struct attribute *md_default_attrs[] = {
4852         &md_level.attr,
4853         &md_layout.attr,
4854         &md_raid_disks.attr,
4855         &md_chunk_size.attr,
4856         &md_size.attr,
4857         &md_resync_start.attr,
4858         &md_metadata.attr,
4859         &md_new_device.attr,
4860         &md_safe_delay.attr,
4861         &md_array_state.attr,
4862         &md_reshape_position.attr,
4863         &md_reshape_direction.attr,
4864         &md_array_size.attr,
4865         &max_corr_read_errors.attr,
4866         NULL,
4867 };
4868
4869 static struct attribute *md_redundancy_attrs[] = {
4870         &md_scan_mode.attr,
4871         &md_last_scan_mode.attr,
4872         &md_mismatches.attr,
4873         &md_sync_min.attr,
4874         &md_sync_max.attr,
4875         &md_sync_speed.attr,
4876         &md_sync_force_parallel.attr,
4877         &md_sync_completed.attr,
4878         &md_min_sync.attr,
4879         &md_max_sync.attr,
4880         &md_suspend_lo.attr,
4881         &md_suspend_hi.attr,
4882         &md_bitmap.attr,
4883         &md_degraded.attr,
4884         NULL,
4885 };
4886 static struct attribute_group md_redundancy_group = {
4887         .name = NULL,
4888         .attrs = md_redundancy_attrs,
4889 };
4890
4891 static ssize_t
4892 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4893 {
4894         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4895         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4896         ssize_t rv;
4897
4898         if (!entry->show)
4899                 return -EIO;
4900         spin_lock(&all_mddevs_lock);
4901         if (list_empty(&mddev->all_mddevs)) {
4902                 spin_unlock(&all_mddevs_lock);
4903                 return -EBUSY;
4904         }
4905         mddev_get(mddev);
4906         spin_unlock(&all_mddevs_lock);
4907
4908         rv = entry->show(mddev, page);
4909         mddev_put(mddev);
4910         return rv;
4911 }
4912
4913 static ssize_t
4914 md_attr_store(struct kobject *kobj, struct attribute *attr,
4915               const char *page, size_t length)
4916 {
4917         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4918         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4919         ssize_t rv;
4920
4921         if (!entry->store)
4922                 return -EIO;
4923         if (!capable(CAP_SYS_ADMIN))
4924                 return -EACCES;
4925         spin_lock(&all_mddevs_lock);
4926         if (list_empty(&mddev->all_mddevs)) {
4927                 spin_unlock(&all_mddevs_lock);
4928                 return -EBUSY;
4929         }
4930         mddev_get(mddev);
4931         spin_unlock(&all_mddevs_lock);
4932         rv = entry->store(mddev, page, length);
4933         mddev_put(mddev);
4934         return rv;
4935 }
4936
4937 static void md_free(struct kobject *ko)
4938 {
4939         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4940
4941         if (mddev->sysfs_state)
4942                 sysfs_put(mddev->sysfs_state);
4943
4944         if (mddev->queue)
4945                 blk_cleanup_queue(mddev->queue);
4946         if (mddev->gendisk) {
4947                 del_gendisk(mddev->gendisk);
4948                 put_disk(mddev->gendisk);
4949         }
4950
4951         kfree(mddev);
4952 }
4953
4954 static const struct sysfs_ops md_sysfs_ops = {
4955         .show   = md_attr_show,
4956         .store  = md_attr_store,
4957 };
4958 static struct kobj_type md_ktype = {
4959         .release        = md_free,
4960         .sysfs_ops      = &md_sysfs_ops,
4961         .default_attrs  = md_default_attrs,
4962 };
4963
4964 int mdp_major = 0;
4965
4966 static void mddev_delayed_delete(struct work_struct *ws)
4967 {
4968         struct mddev *mddev = container_of(ws, struct mddev, del_work);
4969
4970         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4971         kobject_del(&mddev->kobj);
4972         kobject_put(&mddev->kobj);
4973 }
4974
4975 static int md_alloc(dev_t dev, char *name)
4976 {
4977         static DEFINE_MUTEX(disks_mutex);
4978         struct mddev *mddev = mddev_find(dev);
4979         struct gendisk *disk;
4980         int partitioned;
4981         int shift;
4982         int unit;
4983         int error;
4984
4985         if (!mddev)
4986                 return -ENODEV;
4987
4988         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4989         shift = partitioned ? MdpMinorShift : 0;
4990         unit = MINOR(mddev->unit) >> shift;
4991
4992         /* wait for any previous instance of this device to be
4993          * completely removed (mddev_delayed_delete).
4994          */
4995         flush_workqueue(md_misc_wq);
4996
4997         mutex_lock(&disks_mutex);
4998         error = -EEXIST;
4999         if (mddev->gendisk)
5000                 goto abort;
5001
5002         if (name) {
5003                 /* Need to ensure that 'name' is not a duplicate.
5004                  */
5005                 struct mddev *mddev2;
5006                 spin_lock(&all_mddevs_lock);
5007
5008                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5009                         if (mddev2->gendisk &&
5010                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5011                                 spin_unlock(&all_mddevs_lock);
5012                                 goto abort;
5013                         }
5014                 spin_unlock(&all_mddevs_lock);
5015         }
5016
5017         error = -ENOMEM;
5018         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5019         if (!mddev->queue)
5020                 goto abort;
5021         mddev->queue->queuedata = mddev;
5022
5023         blk_queue_make_request(mddev->queue, md_make_request);
5024         blk_set_stacking_limits(&mddev->queue->limits);
5025
5026         disk = alloc_disk(1 << shift);
5027         if (!disk) {
5028                 blk_cleanup_queue(mddev->queue);
5029                 mddev->queue = NULL;
5030                 goto abort;
5031         }
5032         disk->major = MAJOR(mddev->unit);
5033         disk->first_minor = unit << shift;
5034         if (name)
5035                 strcpy(disk->disk_name, name);
5036         else if (partitioned)
5037                 sprintf(disk->disk_name, "md_d%d", unit);
5038         else
5039                 sprintf(disk->disk_name, "md%d", unit);
5040         disk->fops = &md_fops;
5041         disk->private_data = mddev;
5042         disk->queue = mddev->queue;
5043         blk_queue_write_cache(mddev->queue, true, true);
5044         /* Allow extended partitions.  This makes the
5045          * 'mdp' device redundant, but we can't really
5046          * remove it now.
5047          */
5048         disk->flags |= GENHD_FL_EXT_DEVT;
5049         mddev->gendisk = disk;
5050         /* As soon as we call add_disk(), another thread could get
5051          * through to md_open, so make sure it doesn't get too far
5052          */
5053         mutex_lock(&mddev->open_mutex);
5054         add_disk(disk);
5055
5056         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5057                                      &disk_to_dev(disk)->kobj, "%s", "md");
5058         if (error) {
5059                 /* This isn't possible, but as kobject_init_and_add is marked
5060                  * __must_check, we must do something with the result
5061                  */
5062                 pr_debug("md: cannot register %s/md - name in use\n",
5063                          disk->disk_name);
5064                 error = 0;
5065         }
5066         if (mddev->kobj.sd &&
5067             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5068                 pr_debug("pointless warning\n");
5069         mutex_unlock(&mddev->open_mutex);
5070  abort:
5071         mutex_unlock(&disks_mutex);
5072         if (!error && mddev->kobj.sd) {
5073                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5074                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5075         }
5076         mddev_put(mddev);
5077         return error;
5078 }
5079
5080 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5081 {
5082         md_alloc(dev, NULL);
5083         return NULL;
5084 }
5085
5086 static int add_named_array(const char *val, struct kernel_param *kp)
5087 {
5088         /* val must be "md_*" where * is not all digits.
5089          * We allocate an array with a large free minor number, and
5090          * set the name to val.  val must not already be an active name.
5091          */
5092         int len = strlen(val);
5093         char buf[DISK_NAME_LEN];
5094
5095         while (len && val[len-1] == '\n')
5096                 len--;
5097         if (len >= DISK_NAME_LEN)
5098                 return -E2BIG;
5099         strlcpy(buf, val, len+1);
5100         if (strncmp(buf, "md_", 3) != 0)
5101                 return -EINVAL;
5102         return md_alloc(0, buf);
5103 }
5104
5105 static void md_safemode_timeout(unsigned long data)
5106 {
5107         struct mddev *mddev = (struct mddev *) data;
5108
5109         if (!atomic_read(&mddev->writes_pending)) {
5110                 mddev->safemode = 1;
5111                 if (mddev->external)
5112                         sysfs_notify_dirent_safe(mddev->sysfs_state);
5113         }
5114         md_wakeup_thread(mddev->thread);
5115 }
5116
5117 static int start_dirty_degraded;
5118
5119 int md_run(struct mddev *mddev)
5120 {
5121         int err;
5122         struct md_rdev *rdev;
5123         struct md_personality *pers;
5124
5125         if (list_empty(&mddev->disks))
5126                 /* cannot run an array with no devices.. */
5127                 return -EINVAL;
5128
5129         if (mddev->pers)
5130                 return -EBUSY;
5131         /* Cannot run until previous stop completes properly */
5132         if (mddev->sysfs_active)
5133                 return -EBUSY;
5134
5135         /*
5136          * Analyze all RAID superblock(s)
5137          */
5138         if (!mddev->raid_disks) {
5139                 if (!mddev->persistent)
5140                         return -EINVAL;
5141                 analyze_sbs(mddev);
5142         }
5143
5144         if (mddev->level != LEVEL_NONE)
5145                 request_module("md-level-%d", mddev->level);
5146         else if (mddev->clevel[0])
5147                 request_module("md-%s", mddev->clevel);
5148
5149         /*
5150          * Drop all container device buffers, from now on
5151          * the only valid external interface is through the md
5152          * device.
5153          */
5154         rdev_for_each(rdev, mddev) {
5155                 if (test_bit(Faulty, &rdev->flags))
5156                         continue;
5157                 sync_blockdev(rdev->bdev);
5158                 invalidate_bdev(rdev->bdev);
5159
5160                 /* perform some consistency tests on the device.
5161                  * We don't want the data to overlap the metadata,
5162                  * Internal Bitmap issues have been handled elsewhere.
5163                  */
5164                 if (rdev->meta_bdev) {
5165                         /* Nothing to check */;
5166                 } else if (rdev->data_offset < rdev->sb_start) {
5167                         if (mddev->dev_sectors &&
5168                             rdev->data_offset + mddev->dev_sectors
5169                             > rdev->sb_start) {
5170                                 pr_warn("md: %s: data overlaps metadata\n",
5171                                         mdname(mddev));
5172                                 return -EINVAL;
5173                         }
5174                 } else {
5175                         if (rdev->sb_start + rdev->sb_size/512
5176                             > rdev->data_offset) {
5177                                 pr_warn("md: %s: metadata overlaps data\n",
5178                                         mdname(mddev));
5179                                 return -EINVAL;
5180                         }
5181                 }
5182                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5183         }
5184
5185         if (mddev->bio_set == NULL)
5186                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5187
5188         spin_lock(&pers_lock);
5189         pers = find_pers(mddev->level, mddev->clevel);
5190         if (!pers || !try_module_get(pers->owner)) {
5191                 spin_unlock(&pers_lock);
5192                 if (mddev->level != LEVEL_NONE)
5193                         pr_warn("md: personality for level %d is not loaded!\n",
5194                                 mddev->level);
5195                 else
5196                         pr_warn("md: personality for level %s is not loaded!\n",
5197                                 mddev->clevel);
5198                 return -EINVAL;
5199         }
5200         spin_unlock(&pers_lock);
5201         if (mddev->level != pers->level) {
5202                 mddev->level = pers->level;
5203                 mddev->new_level = pers->level;
5204         }
5205         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5206
5207         if (mddev->reshape_position != MaxSector &&
5208             pers->start_reshape == NULL) {
5209                 /* This personality cannot handle reshaping... */
5210                 module_put(pers->owner);
5211                 return -EINVAL;
5212         }
5213
5214         if (pers->sync_request) {
5215                 /* Warn if this is a potentially silly
5216                  * configuration.
5217                  */
5218                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5219                 struct md_rdev *rdev2;
5220                 int warned = 0;
5221
5222                 rdev_for_each(rdev, mddev)
5223                         rdev_for_each(rdev2, mddev) {
5224                                 if (rdev < rdev2 &&
5225                                     rdev->bdev->bd_contains ==
5226                                     rdev2->bdev->bd_contains) {
5227                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5228                                                 mdname(mddev),
5229                                                 bdevname(rdev->bdev,b),
5230                                                 bdevname(rdev2->bdev,b2));
5231                                         warned = 1;
5232                                 }
5233                         }
5234
5235                 if (warned)
5236                         pr_warn("True protection against single-disk failure might be compromised.\n");
5237         }
5238
5239         mddev->recovery = 0;
5240         /* may be over-ridden by personality */
5241         mddev->resync_max_sectors = mddev->dev_sectors;
5242
5243         mddev->ok_start_degraded = start_dirty_degraded;
5244
5245         if (start_readonly && mddev->ro == 0)
5246                 mddev->ro = 2; /* read-only, but switch on first write */
5247
5248         err = pers->run(mddev);
5249         if (err)
5250                 pr_warn("md: pers->run() failed ...\n");
5251         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5252                 WARN_ONCE(!mddev->external_size,
5253                           "%s: default size too small, but 'external_size' not in effect?\n",
5254                           __func__);
5255                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5256                         (unsigned long long)mddev->array_sectors / 2,
5257                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5258                 err = -EINVAL;
5259         }
5260         if (err == 0 && pers->sync_request &&
5261             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5262                 struct bitmap *bitmap;
5263
5264                 bitmap = bitmap_create(mddev, -1);
5265                 if (IS_ERR(bitmap)) {
5266                         err = PTR_ERR(bitmap);
5267                         pr_warn("%s: failed to create bitmap (%d)\n",
5268                                 mdname(mddev), err);
5269                 } else
5270                         mddev->bitmap = bitmap;
5271
5272         }
5273         if (err) {
5274                 mddev_detach(mddev);
5275                 if (mddev->private)
5276                         pers->free(mddev, mddev->private);
5277                 mddev->private = NULL;
5278                 module_put(pers->owner);
5279                 bitmap_destroy(mddev);
5280                 return err;
5281         }
5282         if (mddev->queue) {
5283                 bool nonrot = true;
5284
5285                 rdev_for_each(rdev, mddev) {
5286                         if (rdev->raid_disk >= 0 &&
5287                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5288                                 nonrot = false;
5289                                 break;
5290                         }
5291                 }
5292                 if (mddev->degraded)
5293                         nonrot = false;
5294                 if (nonrot)
5295                         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5296                 else
5297                         queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5298                 mddev->queue->backing_dev_info.congested_data = mddev;
5299                 mddev->queue->backing_dev_info.congested_fn = md_congested;
5300         }
5301         if (pers->sync_request) {
5302                 if (mddev->kobj.sd &&
5303                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5304                         pr_warn("md: cannot register extra attributes for %s\n",
5305                                 mdname(mddev));
5306                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5307         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5308                 mddev->ro = 0;
5309
5310         atomic_set(&mddev->writes_pending,0);
5311         atomic_set(&mddev->max_corr_read_errors,
5312                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5313         mddev->safemode = 0;
5314         if (mddev_is_clustered(mddev))
5315                 mddev->safemode_delay = 0;
5316         else
5317                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5318         mddev->in_sync = 1;
5319         smp_wmb();
5320         spin_lock(&mddev->lock);
5321         mddev->pers = pers;
5322         spin_unlock(&mddev->lock);
5323         rdev_for_each(rdev, mddev)
5324                 if (rdev->raid_disk >= 0)
5325                         if (sysfs_link_rdev(mddev, rdev))
5326                                 /* failure here is OK */;
5327
5328         if (mddev->degraded && !mddev->ro)
5329                 /* This ensures that recovering status is reported immediately
5330                  * via sysfs - until a lack of spares is confirmed.
5331                  */
5332                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5333         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5334
5335         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5336                 md_update_sb(mddev, 0);
5337
5338         md_new_event(mddev);
5339         sysfs_notify_dirent_safe(mddev->sysfs_state);
5340         sysfs_notify_dirent_safe(mddev->sysfs_action);
5341         sysfs_notify(&mddev->kobj, NULL, "degraded");
5342         return 0;
5343 }
5344 EXPORT_SYMBOL_GPL(md_run);
5345
5346 static int do_md_run(struct mddev *mddev)
5347 {
5348         int err;
5349
5350         err = md_run(mddev);
5351         if (err)
5352                 goto out;
5353         err = bitmap_load(mddev);
5354         if (err) {
5355                 bitmap_destroy(mddev);
5356                 goto out;
5357         }
5358
5359         if (mddev_is_clustered(mddev))
5360                 md_allow_write(mddev);
5361
5362         md_wakeup_thread(mddev->thread);
5363         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5364
5365         set_capacity(mddev->gendisk, mddev->array_sectors);
5366         revalidate_disk(mddev->gendisk);
5367         mddev->changed = 1;
5368         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5369 out:
5370         return err;
5371 }
5372
5373 static int restart_array(struct mddev *mddev)
5374 {
5375         struct gendisk *disk = mddev->gendisk;
5376
5377         /* Complain if it has no devices */
5378         if (list_empty(&mddev->disks))
5379                 return -ENXIO;
5380         if (!mddev->pers)
5381                 return -EINVAL;
5382         if (!mddev->ro)
5383                 return -EBUSY;
5384         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5385                 struct md_rdev *rdev;
5386                 bool has_journal = false;
5387
5388                 rcu_read_lock();
5389                 rdev_for_each_rcu(rdev, mddev) {
5390                         if (test_bit(Journal, &rdev->flags) &&
5391                             !test_bit(Faulty, &rdev->flags)) {
5392                                 has_journal = true;
5393                                 break;
5394                         }
5395                 }
5396                 rcu_read_unlock();
5397
5398                 /* Don't restart rw with journal missing/faulty */
5399                 if (!has_journal)
5400                         return -EINVAL;
5401         }
5402
5403         mddev->safemode = 0;
5404         mddev->ro = 0;
5405         set_disk_ro(disk, 0);
5406         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5407         /* Kick recovery or resync if necessary */
5408         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5409         md_wakeup_thread(mddev->thread);
5410         md_wakeup_thread(mddev->sync_thread);
5411         sysfs_notify_dirent_safe(mddev->sysfs_state);
5412         return 0;
5413 }
5414
5415 static void md_clean(struct mddev *mddev)
5416 {
5417         mddev->array_sectors = 0;
5418         mddev->external_size = 0;
5419         mddev->dev_sectors = 0;
5420         mddev->raid_disks = 0;
5421         mddev->recovery_cp = 0;
5422         mddev->resync_min = 0;
5423         mddev->resync_max = MaxSector;
5424         mddev->reshape_position = MaxSector;
5425         mddev->external = 0;
5426         mddev->persistent = 0;
5427         mddev->level = LEVEL_NONE;
5428         mddev->clevel[0] = 0;
5429         mddev->flags = 0;
5430         mddev->ro = 0;
5431         mddev->metadata_type[0] = 0;
5432         mddev->chunk_sectors = 0;
5433         mddev->ctime = mddev->utime = 0;
5434         mddev->layout = 0;
5435         mddev->max_disks = 0;
5436         mddev->events = 0;
5437         mddev->can_decrease_events = 0;
5438         mddev->delta_disks = 0;
5439         mddev->reshape_backwards = 0;
5440         mddev->new_level = LEVEL_NONE;
5441         mddev->new_layout = 0;
5442         mddev->new_chunk_sectors = 0;
5443         mddev->curr_resync = 0;
5444         atomic64_set(&mddev->resync_mismatches, 0);
5445         mddev->suspend_lo = mddev->suspend_hi = 0;
5446         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5447         mddev->recovery = 0;
5448         mddev->in_sync = 0;
5449         mddev->changed = 0;
5450         mddev->degraded = 0;
5451         mddev->safemode = 0;
5452         mddev->private = NULL;
5453         mddev->cluster_info = NULL;
5454         mddev->bitmap_info.offset = 0;
5455         mddev->bitmap_info.default_offset = 0;
5456         mddev->bitmap_info.default_space = 0;
5457         mddev->bitmap_info.chunksize = 0;
5458         mddev->bitmap_info.daemon_sleep = 0;
5459         mddev->bitmap_info.max_write_behind = 0;
5460         mddev->bitmap_info.nodes = 0;
5461 }
5462
5463 static void __md_stop_writes(struct mddev *mddev)
5464 {
5465         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5466         flush_workqueue(md_misc_wq);
5467         if (mddev->sync_thread) {
5468                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5469                 md_reap_sync_thread(mddev);
5470         }
5471
5472         del_timer_sync(&mddev->safemode_timer);
5473
5474         bitmap_flush(mddev);
5475         md_super_wait(mddev);
5476
5477         if (mddev->ro == 0 &&
5478             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5479              (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5480                 /* mark array as shutdown cleanly */
5481                 if (!mddev_is_clustered(mddev))
5482                         mddev->in_sync = 1;
5483                 md_update_sb(mddev, 1);
5484         }
5485 }
5486
5487 void md_stop_writes(struct mddev *mddev)
5488 {
5489         mddev_lock_nointr(mddev);
5490         __md_stop_writes(mddev);
5491         mddev_unlock(mddev);
5492 }
5493 EXPORT_SYMBOL_GPL(md_stop_writes);
5494
5495 static void mddev_detach(struct mddev *mddev)
5496 {
5497         struct bitmap *bitmap = mddev->bitmap;
5498         /* wait for behind writes to complete */
5499         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5500                 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
5501                          mdname(mddev));
5502                 /* need to kick something here to make sure I/O goes? */
5503                 wait_event(bitmap->behind_wait,
5504                            atomic_read(&bitmap->behind_writes) == 0);
5505         }
5506         if (mddev->pers && mddev->pers->quiesce) {
5507                 mddev->pers->quiesce(mddev, 1);
5508                 mddev->pers->quiesce(mddev, 0);
5509         }
5510         md_unregister_thread(&mddev->thread);
5511         if (mddev->queue)
5512                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5513 }
5514
5515 static void __md_stop(struct mddev *mddev)
5516 {
5517         struct md_personality *pers = mddev->pers;
5518         mddev_detach(mddev);
5519         /* Ensure ->event_work is done */
5520         flush_workqueue(md_misc_wq);
5521         spin_lock(&mddev->lock);
5522         mddev->pers = NULL;
5523         spin_unlock(&mddev->lock);
5524         pers->free(mddev, mddev->private);
5525         mddev->private = NULL;
5526         if (pers->sync_request && mddev->to_remove == NULL)
5527                 mddev->to_remove = &md_redundancy_group;
5528         module_put(pers->owner);
5529         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5530 }
5531
5532 void md_stop(struct mddev *mddev)
5533 {
5534         /* stop the array and free an attached data structures.
5535          * This is called from dm-raid
5536          */
5537         __md_stop(mddev);
5538         bitmap_destroy(mddev);
5539         if (mddev->bio_set)
5540                 bioset_free(mddev->bio_set);
5541 }
5542
5543 EXPORT_SYMBOL_GPL(md_stop);
5544
5545 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5546 {
5547         int err = 0;
5548         int did_freeze = 0;
5549
5550         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5551                 did_freeze = 1;
5552                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5553                 md_wakeup_thread(mddev->thread);
5554         }
5555         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5556                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5557         if (mddev->sync_thread)
5558                 /* Thread might be blocked waiting for metadata update
5559                  * which will now never happen */
5560                 wake_up_process(mddev->sync_thread->tsk);
5561
5562         if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5563                 return -EBUSY;
5564         mddev_unlock(mddev);
5565         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5566                                           &mddev->recovery));
5567         wait_event(mddev->sb_wait,
5568                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5569         mddev_lock_nointr(mddev);
5570
5571         mutex_lock(&mddev->open_mutex);
5572         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5573             mddev->sync_thread ||
5574             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5575                 pr_warn("md: %s still in use.\n",mdname(mddev));
5576                 if (did_freeze) {
5577                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5578                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5579                         md_wakeup_thread(mddev->thread);
5580                 }
5581                 err = -EBUSY;
5582                 goto out;
5583         }
5584         if (mddev->pers) {
5585                 __md_stop_writes(mddev);
5586
5587                 err  = -ENXIO;
5588                 if (mddev->ro==1)
5589                         goto out;
5590                 mddev->ro = 1;
5591                 set_disk_ro(mddev->gendisk, 1);
5592                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5593                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5594                 md_wakeup_thread(mddev->thread);
5595                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5596                 err = 0;
5597         }
5598 out:
5599         mutex_unlock(&mddev->open_mutex);
5600         return err;
5601 }
5602
5603 /* mode:
5604  *   0 - completely stop and dis-assemble array
5605  *   2 - stop but do not disassemble array
5606  */
5607 static int do_md_stop(struct mddev *mddev, int mode,
5608                       struct block_device *bdev)
5609 {
5610         struct gendisk *disk = mddev->gendisk;
5611         struct md_rdev *rdev;
5612         int did_freeze = 0;
5613
5614         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5615                 did_freeze = 1;
5616                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5617                 md_wakeup_thread(mddev->thread);
5618         }
5619         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5620                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5621         if (mddev->sync_thread)
5622                 /* Thread might be blocked waiting for metadata update
5623                  * which will now never happen */
5624                 wake_up_process(mddev->sync_thread->tsk);
5625
5626         mddev_unlock(mddev);
5627         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5628                                  !test_bit(MD_RECOVERY_RUNNING,
5629                                            &mddev->recovery)));
5630         mddev_lock_nointr(mddev);
5631
5632         mutex_lock(&mddev->open_mutex);
5633         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5634             mddev->sysfs_active ||
5635             mddev->sync_thread ||
5636             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5637                 pr_warn("md: %s still in use.\n",mdname(mddev));
5638                 mutex_unlock(&mddev->open_mutex);
5639                 if (did_freeze) {
5640                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5641                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5642                         md_wakeup_thread(mddev->thread);
5643                 }
5644                 return -EBUSY;
5645         }
5646         if (mddev->pers) {
5647                 if (mddev->ro)
5648                         set_disk_ro(disk, 0);
5649
5650                 __md_stop_writes(mddev);
5651                 __md_stop(mddev);
5652                 mddev->queue->backing_dev_info.congested_fn = NULL;
5653
5654                 /* tell userspace to handle 'inactive' */
5655                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5656
5657                 rdev_for_each(rdev, mddev)
5658                         if (rdev->raid_disk >= 0)
5659                                 sysfs_unlink_rdev(mddev, rdev);
5660
5661                 set_capacity(disk, 0);
5662                 mutex_unlock(&mddev->open_mutex);
5663                 mddev->changed = 1;
5664                 revalidate_disk(disk);
5665
5666                 if (mddev->ro)
5667                         mddev->ro = 0;
5668         } else
5669                 mutex_unlock(&mddev->open_mutex);
5670         /*
5671          * Free resources if final stop
5672          */
5673         if (mode == 0) {
5674                 pr_info("md: %s stopped.\n", mdname(mddev));
5675
5676                 bitmap_destroy(mddev);
5677                 if (mddev->bitmap_info.file) {
5678                         struct file *f = mddev->bitmap_info.file;
5679                         spin_lock(&mddev->lock);
5680                         mddev->bitmap_info.file = NULL;
5681                         spin_unlock(&mddev->lock);
5682                         fput(f);
5683                 }
5684                 mddev->bitmap_info.offset = 0;
5685
5686                 export_array(mddev);
5687
5688                 md_clean(mddev);
5689                 if (mddev->hold_active == UNTIL_STOP)
5690                         mddev->hold_active = 0;
5691         }
5692         md_new_event(mddev);
5693         sysfs_notify_dirent_safe(mddev->sysfs_state);
5694         return 0;
5695 }
5696
5697 #ifndef MODULE
5698 static void autorun_array(struct mddev *mddev)
5699 {
5700         struct md_rdev *rdev;
5701         int err;
5702
5703         if (list_empty(&mddev->disks))
5704                 return;
5705
5706         pr_info("md: running: ");
5707
5708         rdev_for_each(rdev, mddev) {
5709                 char b[BDEVNAME_SIZE];
5710                 pr_cont("<%s>", bdevname(rdev->bdev,b));
5711         }
5712         pr_cont("\n");
5713
5714         err = do_md_run(mddev);
5715         if (err) {
5716                 pr_warn("md: do_md_run() returned %d\n", err);
5717                 do_md_stop(mddev, 0, NULL);
5718         }
5719 }
5720
5721 /*
5722  * lets try to run arrays based on all disks that have arrived
5723  * until now. (those are in pending_raid_disks)
5724  *
5725  * the method: pick the first pending disk, collect all disks with
5726  * the same UUID, remove all from the pending list and put them into
5727  * the 'same_array' list. Then order this list based on superblock
5728  * update time (freshest comes first), kick out 'old' disks and
5729  * compare superblocks. If everything's fine then run it.
5730  *
5731  * If "unit" is allocated, then bump its reference count
5732  */
5733 static void autorun_devices(int part)
5734 {
5735         struct md_rdev *rdev0, *rdev, *tmp;
5736         struct mddev *mddev;
5737         char b[BDEVNAME_SIZE];
5738
5739         pr_info("md: autorun ...\n");
5740         while (!list_empty(&pending_raid_disks)) {
5741                 int unit;
5742                 dev_t dev;
5743                 LIST_HEAD(candidates);
5744                 rdev0 = list_entry(pending_raid_disks.next,
5745                                          struct md_rdev, same_set);
5746
5747                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
5748                 INIT_LIST_HEAD(&candidates);
5749                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5750                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5751                                 pr_debug("md:  adding %s ...\n",
5752                                          bdevname(rdev->bdev,b));
5753                                 list_move(&rdev->same_set, &candidates);
5754                         }
5755                 /*
5756                  * now we have a set of devices, with all of them having
5757                  * mostly sane superblocks. It's time to allocate the
5758                  * mddev.
5759                  */
5760                 if (part) {
5761                         dev = MKDEV(mdp_major,
5762                                     rdev0->preferred_minor << MdpMinorShift);
5763                         unit = MINOR(dev) >> MdpMinorShift;
5764                 } else {
5765                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5766                         unit = MINOR(dev);
5767                 }
5768                 if (rdev0->preferred_minor != unit) {
5769                         pr_warn("md: unit number in %s is bad: %d\n",
5770                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5771                         break;
5772                 }
5773
5774                 md_probe(dev, NULL, NULL);
5775                 mddev = mddev_find(dev);
5776                 if (!mddev || !mddev->gendisk) {
5777                         if (mddev)
5778                                 mddev_put(mddev);
5779                         break;
5780                 }
5781                 if (mddev_lock(mddev))
5782                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
5783                 else if (mddev->raid_disks || mddev->major_version
5784                          || !list_empty(&mddev->disks)) {
5785                         pr_warn("md: %s already running, cannot run %s\n",
5786                                 mdname(mddev), bdevname(rdev0->bdev,b));
5787                         mddev_unlock(mddev);
5788                 } else {
5789                         pr_debug("md: created %s\n", mdname(mddev));
5790                         mddev->persistent = 1;
5791                         rdev_for_each_list(rdev, tmp, &candidates) {
5792                                 list_del_init(&rdev->same_set);
5793                                 if (bind_rdev_to_array(rdev, mddev))
5794                                         export_rdev(rdev);
5795                         }
5796                         autorun_array(mddev);
5797                         mddev_unlock(mddev);
5798                 }
5799                 /* on success, candidates will be empty, on error
5800                  * it won't...
5801                  */
5802                 rdev_for_each_list(rdev, tmp, &candidates) {
5803                         list_del_init(&rdev->same_set);
5804                         export_rdev(rdev);
5805                 }
5806                 mddev_put(mddev);
5807         }
5808         pr_info("md: ... autorun DONE.\n");
5809 }
5810 #endif /* !MODULE */
5811
5812 static int get_version(void __user *arg)
5813 {
5814         mdu_version_t ver;
5815
5816         ver.major = MD_MAJOR_VERSION;
5817         ver.minor = MD_MINOR_VERSION;
5818         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5819
5820         if (copy_to_user(arg, &ver, sizeof(ver)))
5821                 return -EFAULT;
5822
5823         return 0;
5824 }
5825
5826 static int get_array_info(struct mddev *mddev, void __user *arg)
5827 {
5828         mdu_array_info_t info;
5829         int nr,working,insync,failed,spare;
5830         struct md_rdev *rdev;
5831
5832         nr = working = insync = failed = spare = 0;
5833         rcu_read_lock();
5834         rdev_for_each_rcu(rdev, mddev) {
5835                 nr++;
5836                 if (test_bit(Faulty, &rdev->flags))
5837                         failed++;
5838                 else {
5839                         working++;
5840                         if (test_bit(In_sync, &rdev->flags))
5841                                 insync++;
5842                         else if (test_bit(Journal, &rdev->flags))
5843                                 /* TODO: add journal count to md_u.h */
5844                                 ;
5845                         else
5846                                 spare++;
5847                 }
5848         }
5849         rcu_read_unlock();
5850
5851         info.major_version = mddev->major_version;
5852         info.minor_version = mddev->minor_version;
5853         info.patch_version = MD_PATCHLEVEL_VERSION;
5854         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5855         info.level         = mddev->level;
5856         info.size          = mddev->dev_sectors / 2;
5857         if (info.size != mddev->dev_sectors / 2) /* overflow */
5858                 info.size = -1;
5859         info.nr_disks      = nr;
5860         info.raid_disks    = mddev->raid_disks;
5861         info.md_minor      = mddev->md_minor;
5862         info.not_persistent= !mddev->persistent;
5863
5864         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5865         info.state         = 0;
5866         if (mddev->in_sync)
5867                 info.state = (1<<MD_SB_CLEAN);
5868         if (mddev->bitmap && mddev->bitmap_info.offset)
5869                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5870         if (mddev_is_clustered(mddev))
5871                 info.state |= (1<<MD_SB_CLUSTERED);
5872         info.active_disks  = insync;
5873         info.working_disks = working;
5874         info.failed_disks  = failed;
5875         info.spare_disks   = spare;
5876
5877         info.layout        = mddev->layout;
5878         info.chunk_size    = mddev->chunk_sectors << 9;
5879
5880         if (copy_to_user(arg, &info, sizeof(info)))
5881                 return -EFAULT;
5882
5883         return 0;
5884 }
5885
5886 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5887 {
5888         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5889         char *ptr;
5890         int err;
5891
5892         file = kzalloc(sizeof(*file), GFP_NOIO);
5893         if (!file)
5894                 return -ENOMEM;
5895
5896         err = 0;
5897         spin_lock(&mddev->lock);
5898         /* bitmap enabled */
5899         if (mddev->bitmap_info.file) {
5900                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5901                                 sizeof(file->pathname));
5902                 if (IS_ERR(ptr))
5903                         err = PTR_ERR(ptr);
5904                 else
5905                         memmove(file->pathname, ptr,
5906                                 sizeof(file->pathname)-(ptr-file->pathname));
5907         }
5908         spin_unlock(&mddev->lock);
5909
5910         if (err == 0 &&
5911             copy_to_user(arg, file, sizeof(*file)))
5912                 err = -EFAULT;
5913
5914         kfree(file);
5915         return err;
5916 }
5917
5918 static int get_disk_info(struct mddev *mddev, void __user * arg)
5919 {
5920         mdu_disk_info_t info;
5921         struct md_rdev *rdev;
5922
5923         if (copy_from_user(&info, arg, sizeof(info)))
5924                 return -EFAULT;
5925
5926         rcu_read_lock();
5927         rdev = md_find_rdev_nr_rcu(mddev, info.number);
5928         if (rdev) {
5929                 info.major = MAJOR(rdev->bdev->bd_dev);
5930                 info.minor = MINOR(rdev->bdev->bd_dev);
5931                 info.raid_disk = rdev->raid_disk;
5932                 info.state = 0;
5933                 if (test_bit(Faulty, &rdev->flags))
5934                         info.state |= (1<<MD_DISK_FAULTY);
5935                 else if (test_bit(In_sync, &rdev->flags)) {
5936                         info.state |= (1<<MD_DISK_ACTIVE);
5937                         info.state |= (1<<MD_DISK_SYNC);
5938                 }
5939                 if (test_bit(Journal, &rdev->flags))
5940                         info.state |= (1<<MD_DISK_JOURNAL);
5941                 if (test_bit(WriteMostly, &rdev->flags))
5942                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5943         } else {
5944                 info.major = info.minor = 0;
5945                 info.raid_disk = -1;
5946                 info.state = (1<<MD_DISK_REMOVED);
5947         }
5948         rcu_read_unlock();
5949
5950         if (copy_to_user(arg, &info, sizeof(info)))
5951                 return -EFAULT;
5952
5953         return 0;
5954 }
5955
5956 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
5957 {
5958         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5959         struct md_rdev *rdev;
5960         dev_t dev = MKDEV(info->major,info->minor);
5961
5962         if (mddev_is_clustered(mddev) &&
5963                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
5964                 pr_warn("%s: Cannot add to clustered mddev.\n",
5965                         mdname(mddev));
5966                 return -EINVAL;
5967         }
5968
5969         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5970                 return -EOVERFLOW;
5971
5972         if (!mddev->raid_disks) {
5973                 int err;
5974                 /* expecting a device which has a superblock */
5975                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5976                 if (IS_ERR(rdev)) {
5977                         pr_warn("md: md_import_device returned %ld\n",
5978                                 PTR_ERR(rdev));
5979                         return PTR_ERR(rdev);
5980                 }
5981                 if (!list_empty(&mddev->disks)) {
5982                         struct md_rdev *rdev0
5983                                 = list_entry(mddev->disks.next,
5984                                              struct md_rdev, same_set);
5985                         err = super_types[mddev->major_version]
5986                                 .load_super(rdev, rdev0, mddev->minor_version);
5987                         if (err < 0) {
5988                                 pr_warn("md: %s has different UUID to %s\n",
5989                                         bdevname(rdev->bdev,b),
5990                                         bdevname(rdev0->bdev,b2));
5991                                 export_rdev(rdev);
5992                                 return -EINVAL;
5993                         }
5994                 }
5995                 err = bind_rdev_to_array(rdev, mddev);
5996                 if (err)
5997                         export_rdev(rdev);
5998                 return err;
5999         }
6000
6001         /*
6002          * add_new_disk can be used once the array is assembled
6003          * to add "hot spares".  They must already have a superblock
6004          * written
6005          */
6006         if (mddev->pers) {
6007                 int err;
6008                 if (!mddev->pers->hot_add_disk) {
6009                         pr_warn("%s: personality does not support diskops!\n",
6010                                 mdname(mddev));
6011                         return -EINVAL;
6012                 }
6013                 if (mddev->persistent)
6014                         rdev = md_import_device(dev, mddev->major_version,
6015                                                 mddev->minor_version);
6016                 else
6017                         rdev = md_import_device(dev, -1, -1);
6018                 if (IS_ERR(rdev)) {
6019                         pr_warn("md: md_import_device returned %ld\n",
6020                                 PTR_ERR(rdev));
6021                         return PTR_ERR(rdev);
6022                 }
6023                 /* set saved_raid_disk if appropriate */
6024                 if (!mddev->persistent) {
6025                         if (info->state & (1<<MD_DISK_SYNC)  &&
6026                             info->raid_disk < mddev->raid_disks) {
6027                                 rdev->raid_disk = info->raid_disk;
6028                                 set_bit(In_sync, &rdev->flags);
6029                                 clear_bit(Bitmap_sync, &rdev->flags);
6030                         } else
6031                                 rdev->raid_disk = -1;
6032                         rdev->saved_raid_disk = rdev->raid_disk;
6033                 } else
6034                         super_types[mddev->major_version].
6035                                 validate_super(mddev, rdev);
6036                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6037                      rdev->raid_disk != info->raid_disk) {
6038                         /* This was a hot-add request, but events doesn't
6039                          * match, so reject it.
6040                          */
6041                         export_rdev(rdev);
6042                         return -EINVAL;
6043                 }
6044
6045                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6046                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6047                         set_bit(WriteMostly, &rdev->flags);
6048                 else
6049                         clear_bit(WriteMostly, &rdev->flags);
6050
6051                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6052                         struct md_rdev *rdev2;
6053                         bool has_journal = false;
6054
6055                         /* make sure no existing journal disk */
6056                         rdev_for_each(rdev2, mddev) {
6057                                 if (test_bit(Journal, &rdev2->flags)) {
6058                                         has_journal = true;
6059                                         break;
6060                                 }
6061                         }
6062                         if (has_journal) {
6063                                 export_rdev(rdev);
6064                                 return -EBUSY;
6065                         }
6066                         set_bit(Journal, &rdev->flags);
6067                 }
6068                 /*
6069                  * check whether the device shows up in other nodes
6070                  */
6071                 if (mddev_is_clustered(mddev)) {
6072                         if (info->state & (1 << MD_DISK_CANDIDATE))
6073                                 set_bit(Candidate, &rdev->flags);
6074                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6075                                 /* --add initiated by this node */
6076                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6077                                 if (err) {
6078                                         export_rdev(rdev);
6079                                         return err;
6080                                 }
6081                         }
6082                 }
6083
6084                 rdev->raid_disk = -1;
6085                 err = bind_rdev_to_array(rdev, mddev);
6086
6087                 if (err)
6088                         export_rdev(rdev);
6089
6090                 if (mddev_is_clustered(mddev)) {
6091                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6092                                 if (!err) {
6093                                         err = md_cluster_ops->new_disk_ack(mddev,
6094                                                 err == 0);
6095                                         if (err)
6096                                                 md_kick_rdev_from_array(rdev);
6097                                 }
6098                         } else {
6099                                 if (err)
6100                                         md_cluster_ops->add_new_disk_cancel(mddev);
6101                                 else
6102                                         err = add_bound_rdev(rdev);
6103                         }
6104
6105                 } else if (!err)
6106                         err = add_bound_rdev(rdev);
6107
6108                 return err;
6109         }
6110
6111         /* otherwise, add_new_disk is only allowed
6112          * for major_version==0 superblocks
6113          */
6114         if (mddev->major_version != 0) {
6115                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6116                 return -EINVAL;
6117         }
6118
6119         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6120                 int err;
6121                 rdev = md_import_device(dev, -1, 0);
6122                 if (IS_ERR(rdev)) {
6123                         pr_warn("md: error, md_import_device() returned %ld\n",
6124                                 PTR_ERR(rdev));
6125                         return PTR_ERR(rdev);
6126                 }
6127                 rdev->desc_nr = info->number;
6128                 if (info->raid_disk < mddev->raid_disks)
6129                         rdev->raid_disk = info->raid_disk;
6130                 else
6131                         rdev->raid_disk = -1;
6132
6133                 if (rdev->raid_disk < mddev->raid_disks)
6134                         if (info->state & (1<<MD_DISK_SYNC))
6135                                 set_bit(In_sync, &rdev->flags);
6136
6137                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6138                         set_bit(WriteMostly, &rdev->flags);
6139
6140                 if (!mddev->persistent) {
6141                         pr_debug("md: nonpersistent superblock ...\n");
6142                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6143                 } else
6144                         rdev->sb_start = calc_dev_sboffset(rdev);
6145                 rdev->sectors = rdev->sb_start;
6146
6147                 err = bind_rdev_to_array(rdev, mddev);
6148                 if (err) {
6149                         export_rdev(rdev);
6150                         return err;
6151                 }
6152         }
6153
6154         return 0;
6155 }
6156
6157 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6158 {
6159         char b[BDEVNAME_SIZE];
6160         struct md_rdev *rdev;
6161
6162         rdev = find_rdev(mddev, dev);
6163         if (!rdev)
6164                 return -ENXIO;
6165
6166         if (rdev->raid_disk < 0)
6167                 goto kick_rdev;
6168
6169         clear_bit(Blocked, &rdev->flags);
6170         remove_and_add_spares(mddev, rdev);
6171
6172         if (rdev->raid_disk >= 0)
6173                 goto busy;
6174
6175 kick_rdev:
6176         if (mddev_is_clustered(mddev))
6177                 md_cluster_ops->remove_disk(mddev, rdev);
6178
6179         md_kick_rdev_from_array(rdev);
6180         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6181         if (mddev->thread)
6182                 md_wakeup_thread(mddev->thread);
6183         else
6184                 md_update_sb(mddev, 1);
6185         md_new_event(mddev);
6186
6187         return 0;
6188 busy:
6189         pr_debug("md: cannot remove active disk %s from %s ...\n",
6190                  bdevname(rdev->bdev,b), mdname(mddev));
6191         return -EBUSY;
6192 }
6193
6194 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6195 {
6196         char b[BDEVNAME_SIZE];
6197         int err;
6198         struct md_rdev *rdev;
6199
6200         if (!mddev->pers)
6201                 return -ENODEV;
6202
6203         if (mddev->major_version != 0) {
6204                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6205                         mdname(mddev));
6206                 return -EINVAL;
6207         }
6208         if (!mddev->pers->hot_add_disk) {
6209                 pr_warn("%s: personality does not support diskops!\n",
6210                         mdname(mddev));
6211                 return -EINVAL;
6212         }
6213
6214         rdev = md_import_device(dev, -1, 0);
6215         if (IS_ERR(rdev)) {
6216                 pr_warn("md: error, md_import_device() returned %ld\n",
6217                         PTR_ERR(rdev));
6218                 return -EINVAL;
6219         }
6220
6221         if (mddev->persistent)
6222                 rdev->sb_start = calc_dev_sboffset(rdev);
6223         else
6224                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6225
6226         rdev->sectors = rdev->sb_start;
6227
6228         if (test_bit(Faulty, &rdev->flags)) {
6229                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6230                         bdevname(rdev->bdev,b), mdname(mddev));
6231                 err = -EINVAL;
6232                 goto abort_export;
6233         }
6234
6235         clear_bit(In_sync, &rdev->flags);
6236         rdev->desc_nr = -1;
6237         rdev->saved_raid_disk = -1;
6238         err = bind_rdev_to_array(rdev, mddev);
6239         if (err)
6240                 goto abort_export;
6241
6242         /*
6243          * The rest should better be atomic, we can have disk failures
6244          * noticed in interrupt contexts ...
6245          */
6246
6247         rdev->raid_disk = -1;
6248
6249         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6250         if (!mddev->thread)
6251                 md_update_sb(mddev, 1);
6252         /*
6253          * Kick recovery, maybe this spare has to be added to the
6254          * array immediately.
6255          */
6256         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6257         md_wakeup_thread(mddev->thread);
6258         md_new_event(mddev);
6259         return 0;
6260
6261 abort_export:
6262         export_rdev(rdev);
6263         return err;
6264 }
6265
6266 static int set_bitmap_file(struct mddev *mddev, int fd)
6267 {
6268         int err = 0;
6269
6270         if (mddev->pers) {
6271                 if (!mddev->pers->quiesce || !mddev->thread)
6272                         return -EBUSY;
6273                 if (mddev->recovery || mddev->sync_thread)
6274                         return -EBUSY;
6275                 /* we should be able to change the bitmap.. */
6276         }
6277
6278         if (fd >= 0) {
6279                 struct inode *inode;
6280                 struct file *f;
6281
6282                 if (mddev->bitmap || mddev->bitmap_info.file)
6283                         return -EEXIST; /* cannot add when bitmap is present */
6284                 f = fget(fd);
6285
6286                 if (f == NULL) {
6287                         pr_warn("%s: error: failed to get bitmap file\n",
6288                                 mdname(mddev));
6289                         return -EBADF;
6290                 }
6291
6292                 inode = f->f_mapping->host;
6293                 if (!S_ISREG(inode->i_mode)) {
6294                         pr_warn("%s: error: bitmap file must be a regular file\n",
6295                                 mdname(mddev));
6296                         err = -EBADF;
6297                 } else if (!(f->f_mode & FMODE_WRITE)) {
6298                         pr_warn("%s: error: bitmap file must open for write\n",
6299                                 mdname(mddev));
6300                         err = -EBADF;
6301                 } else if (atomic_read(&inode->i_writecount) != 1) {
6302                         pr_warn("%s: error: bitmap file is already in use\n",
6303                                 mdname(mddev));
6304                         err = -EBUSY;
6305                 }
6306                 if (err) {
6307                         fput(f);
6308                         return err;
6309                 }
6310                 mddev->bitmap_info.file = f;
6311                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6312         } else if (mddev->bitmap == NULL)
6313                 return -ENOENT; /* cannot remove what isn't there */
6314         err = 0;
6315         if (mddev->pers) {
6316                 mddev->pers->quiesce(mddev, 1);
6317                 if (fd >= 0) {
6318                         struct bitmap *bitmap;
6319
6320                         bitmap = bitmap_create(mddev, -1);
6321                         if (!IS_ERR(bitmap)) {
6322                                 mddev->bitmap = bitmap;
6323                                 err = bitmap_load(mddev);
6324                         } else
6325                                 err = PTR_ERR(bitmap);
6326                 }
6327                 if (fd < 0 || err) {
6328                         bitmap_destroy(mddev);
6329                         fd = -1; /* make sure to put the file */
6330                 }
6331                 mddev->pers->quiesce(mddev, 0);
6332         }
6333         if (fd < 0) {
6334                 struct file *f = mddev->bitmap_info.file;
6335                 if (f) {
6336                         spin_lock(&mddev->lock);
6337                         mddev->bitmap_info.file = NULL;
6338                         spin_unlock(&mddev->lock);
6339                         fput(f);
6340                 }
6341         }
6342
6343         return err;
6344 }
6345
6346 /*
6347  * set_array_info is used two different ways
6348  * The original usage is when creating a new array.
6349  * In this usage, raid_disks is > 0 and it together with
6350  *  level, size, not_persistent,layout,chunksize determine the
6351  *  shape of the array.
6352  *  This will always create an array with a type-0.90.0 superblock.
6353  * The newer usage is when assembling an array.
6354  *  In this case raid_disks will be 0, and the major_version field is
6355  *  use to determine which style super-blocks are to be found on the devices.
6356  *  The minor and patch _version numbers are also kept incase the
6357  *  super_block handler wishes to interpret them.
6358  */
6359 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6360 {
6361
6362         if (info->raid_disks == 0) {
6363                 /* just setting version number for superblock loading */
6364                 if (info->major_version < 0 ||
6365                     info->major_version >= ARRAY_SIZE(super_types) ||
6366                     super_types[info->major_version].name == NULL) {
6367                         /* maybe try to auto-load a module? */
6368                         pr_warn("md: superblock version %d not known\n",
6369                                 info->major_version);
6370                         return -EINVAL;
6371                 }
6372                 mddev->major_version = info->major_version;
6373                 mddev->minor_version = info->minor_version;
6374                 mddev->patch_version = info->patch_version;
6375                 mddev->persistent = !info->not_persistent;
6376                 /* ensure mddev_put doesn't delete this now that there
6377                  * is some minimal configuration.
6378                  */
6379                 mddev->ctime         = ktime_get_real_seconds();
6380                 return 0;
6381         }
6382         mddev->major_version = MD_MAJOR_VERSION;
6383         mddev->minor_version = MD_MINOR_VERSION;
6384         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6385         mddev->ctime         = ktime_get_real_seconds();
6386
6387         mddev->level         = info->level;
6388         mddev->clevel[0]     = 0;
6389         mddev->dev_sectors   = 2 * (sector_t)info->size;
6390         mddev->raid_disks    = info->raid_disks;
6391         /* don't set md_minor, it is determined by which /dev/md* was
6392          * openned
6393          */
6394         if (info->state & (1<<MD_SB_CLEAN))
6395                 mddev->recovery_cp = MaxSector;
6396         else
6397                 mddev->recovery_cp = 0;
6398         mddev->persistent    = ! info->not_persistent;
6399         mddev->external      = 0;
6400
6401         mddev->layout        = info->layout;
6402         mddev->chunk_sectors = info->chunk_size >> 9;
6403
6404         mddev->max_disks     = MD_SB_DISKS;
6405
6406         if (mddev->persistent)
6407                 mddev->flags         = 0;
6408         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6409
6410         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6411         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6412         mddev->bitmap_info.offset = 0;
6413
6414         mddev->reshape_position = MaxSector;
6415
6416         /*
6417          * Generate a 128 bit UUID
6418          */
6419         get_random_bytes(mddev->uuid, 16);
6420
6421         mddev->new_level = mddev->level;
6422         mddev->new_chunk_sectors = mddev->chunk_sectors;
6423         mddev->new_layout = mddev->layout;
6424         mddev->delta_disks = 0;
6425         mddev->reshape_backwards = 0;
6426
6427         return 0;
6428 }
6429
6430 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6431 {
6432         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6433
6434         if (mddev->external_size)
6435                 return;
6436
6437         mddev->array_sectors = array_sectors;
6438 }
6439 EXPORT_SYMBOL(md_set_array_sectors);
6440
6441 static int update_size(struct mddev *mddev, sector_t num_sectors)
6442 {
6443         struct md_rdev *rdev;
6444         int rv;
6445         int fit = (num_sectors == 0);
6446
6447         /* cluster raid doesn't support update size */
6448         if (mddev_is_clustered(mddev))
6449                 return -EINVAL;
6450
6451         if (mddev->pers->resize == NULL)
6452                 return -EINVAL;
6453         /* The "num_sectors" is the number of sectors of each device that
6454          * is used.  This can only make sense for arrays with redundancy.
6455          * linear and raid0 always use whatever space is available. We can only
6456          * consider changing this number if no resync or reconstruction is
6457          * happening, and if the new size is acceptable. It must fit before the
6458          * sb_start or, if that is <data_offset, it must fit before the size
6459          * of each device.  If num_sectors is zero, we find the largest size
6460          * that fits.
6461          */
6462         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6463             mddev->sync_thread)
6464                 return -EBUSY;
6465         if (mddev->ro)
6466                 return -EROFS;
6467
6468         rdev_for_each(rdev, mddev) {
6469                 sector_t avail = rdev->sectors;
6470
6471                 if (fit && (num_sectors == 0 || num_sectors > avail))
6472                         num_sectors = avail;
6473                 if (avail < num_sectors)
6474                         return -ENOSPC;
6475         }
6476         rv = mddev->pers->resize(mddev, num_sectors);
6477         if (!rv)
6478                 revalidate_disk(mddev->gendisk);
6479         return rv;
6480 }
6481
6482 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6483 {
6484         int rv;
6485         struct md_rdev *rdev;
6486         /* change the number of raid disks */
6487         if (mddev->pers->check_reshape == NULL)
6488                 return -EINVAL;
6489         if (mddev->ro)
6490                 return -EROFS;
6491         if (raid_disks <= 0 ||
6492             (mddev->max_disks && raid_disks >= mddev->max_disks))
6493                 return -EINVAL;
6494         if (mddev->sync_thread ||
6495             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6496             mddev->reshape_position != MaxSector)
6497                 return -EBUSY;
6498
6499         rdev_for_each(rdev, mddev) {
6500                 if (mddev->raid_disks < raid_disks &&
6501                     rdev->data_offset < rdev->new_data_offset)
6502                         return -EINVAL;
6503                 if (mddev->raid_disks > raid_disks &&
6504                     rdev->data_offset > rdev->new_data_offset)
6505                         return -EINVAL;
6506         }
6507
6508         mddev->delta_disks = raid_disks - mddev->raid_disks;
6509         if (mddev->delta_disks < 0)
6510                 mddev->reshape_backwards = 1;
6511         else if (mddev->delta_disks > 0)
6512                 mddev->reshape_backwards = 0;
6513
6514         rv = mddev->pers->check_reshape(mddev);
6515         if (rv < 0) {
6516                 mddev->delta_disks = 0;
6517                 mddev->reshape_backwards = 0;
6518         }
6519         return rv;
6520 }
6521
6522 /*
6523  * update_array_info is used to change the configuration of an
6524  * on-line array.
6525  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6526  * fields in the info are checked against the array.
6527  * Any differences that cannot be handled will cause an error.
6528  * Normally, only one change can be managed at a time.
6529  */
6530 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6531 {
6532         int rv = 0;
6533         int cnt = 0;
6534         int state = 0;
6535
6536         /* calculate expected state,ignoring low bits */
6537         if (mddev->bitmap && mddev->bitmap_info.offset)
6538                 state |= (1 << MD_SB_BITMAP_PRESENT);
6539
6540         if (mddev->major_version != info->major_version ||
6541             mddev->minor_version != info->minor_version ||
6542 /*          mddev->patch_version != info->patch_version || */
6543             mddev->ctime         != info->ctime         ||
6544             mddev->level         != info->level         ||
6545 /*          mddev->layout        != info->layout        || */
6546             mddev->persistent    != !info->not_persistent ||
6547             mddev->chunk_sectors != info->chunk_size >> 9 ||
6548             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6549             ((state^info->state) & 0xfffffe00)
6550                 )
6551                 return -EINVAL;
6552         /* Check there is only one change */
6553         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6554                 cnt++;
6555         if (mddev->raid_disks != info->raid_disks)
6556                 cnt++;
6557         if (mddev->layout != info->layout)
6558                 cnt++;
6559         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6560                 cnt++;
6561         if (cnt == 0)
6562                 return 0;
6563         if (cnt > 1)
6564                 return -EINVAL;
6565
6566         if (mddev->layout != info->layout) {
6567                 /* Change layout
6568                  * we don't need to do anything at the md level, the
6569                  * personality will take care of it all.
6570                  */
6571                 if (mddev->pers->check_reshape == NULL)
6572                         return -EINVAL;
6573                 else {
6574                         mddev->new_layout = info->layout;
6575                         rv = mddev->pers->check_reshape(mddev);
6576                         if (rv)
6577                                 mddev->new_layout = mddev->layout;
6578                         return rv;
6579                 }
6580         }
6581         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6582                 rv = update_size(mddev, (sector_t)info->size * 2);
6583
6584         if (mddev->raid_disks    != info->raid_disks)
6585                 rv = update_raid_disks(mddev, info->raid_disks);
6586
6587         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6588                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6589                         rv = -EINVAL;
6590                         goto err;
6591                 }
6592                 if (mddev->recovery || mddev->sync_thread) {
6593                         rv = -EBUSY;
6594                         goto err;
6595                 }
6596                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6597                         struct bitmap *bitmap;
6598                         /* add the bitmap */
6599                         if (mddev->bitmap) {
6600                                 rv = -EEXIST;
6601                                 goto err;
6602                         }
6603                         if (mddev->bitmap_info.default_offset == 0) {
6604                                 rv = -EINVAL;
6605                                 goto err;
6606                         }
6607                         mddev->bitmap_info.offset =
6608                                 mddev->bitmap_info.default_offset;
6609                         mddev->bitmap_info.space =
6610                                 mddev->bitmap_info.default_space;
6611                         mddev->pers->quiesce(mddev, 1);
6612                         bitmap = bitmap_create(mddev, -1);
6613                         if (!IS_ERR(bitmap)) {
6614                                 mddev->bitmap = bitmap;
6615                                 rv = bitmap_load(mddev);
6616                         } else
6617                                 rv = PTR_ERR(bitmap);
6618                         if (rv)
6619                                 bitmap_destroy(mddev);
6620                         mddev->pers->quiesce(mddev, 0);
6621                 } else {
6622                         /* remove the bitmap */
6623                         if (!mddev->bitmap) {
6624                                 rv = -ENOENT;
6625                                 goto err;
6626                         }
6627                         if (mddev->bitmap->storage.file) {
6628                                 rv = -EINVAL;
6629                                 goto err;
6630                         }
6631                         if (mddev->bitmap_info.nodes) {
6632                                 /* hold PW on all the bitmap lock */
6633                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6634                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
6635                                         rv = -EPERM;
6636                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6637                                         goto err;
6638                                 }
6639
6640                                 mddev->bitmap_info.nodes = 0;
6641                                 md_cluster_ops->leave(mddev);
6642                         }
6643                         mddev->pers->quiesce(mddev, 1);
6644                         bitmap_destroy(mddev);
6645                         mddev->pers->quiesce(mddev, 0);
6646                         mddev->bitmap_info.offset = 0;
6647                 }
6648         }
6649         md_update_sb(mddev, 1);
6650         return rv;
6651 err:
6652         return rv;
6653 }
6654
6655 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6656 {
6657         struct md_rdev *rdev;
6658         int err = 0;
6659
6660         if (mddev->pers == NULL)
6661                 return -ENODEV;
6662
6663         rcu_read_lock();
6664         rdev = find_rdev_rcu(mddev, dev);
6665         if (!rdev)
6666                 err =  -ENODEV;
6667         else {
6668                 md_error(mddev, rdev);
6669                 if (!test_bit(Faulty, &rdev->flags))
6670                         err = -EBUSY;
6671         }
6672         rcu_read_unlock();
6673         return err;
6674 }
6675
6676 /*
6677  * We have a problem here : there is no easy way to give a CHS
6678  * virtual geometry. We currently pretend that we have a 2 heads
6679  * 4 sectors (with a BIG number of cylinders...). This drives
6680  * dosfs just mad... ;-)
6681  */
6682 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6683 {
6684         struct mddev *mddev = bdev->bd_disk->private_data;
6685
6686         geo->heads = 2;
6687         geo->sectors = 4;
6688         geo->cylinders = mddev->array_sectors / 8;
6689         return 0;
6690 }
6691
6692 static inline bool md_ioctl_valid(unsigned int cmd)
6693 {
6694         switch (cmd) {
6695         case ADD_NEW_DISK:
6696         case BLKROSET:
6697         case GET_ARRAY_INFO:
6698         case GET_BITMAP_FILE:
6699         case GET_DISK_INFO:
6700         case HOT_ADD_DISK:
6701         case HOT_REMOVE_DISK:
6702         case RAID_AUTORUN:
6703         case RAID_VERSION:
6704         case RESTART_ARRAY_RW:
6705         case RUN_ARRAY:
6706         case SET_ARRAY_INFO:
6707         case SET_BITMAP_FILE:
6708         case SET_DISK_FAULTY:
6709         case STOP_ARRAY:
6710         case STOP_ARRAY_RO:
6711         case CLUSTERED_DISK_NACK:
6712                 return true;
6713         default:
6714                 return false;
6715         }
6716 }
6717
6718 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6719                         unsigned int cmd, unsigned long arg)
6720 {
6721         int err = 0;
6722         void __user *argp = (void __user *)arg;
6723         struct mddev *mddev = NULL;
6724         int ro;
6725
6726         if (!md_ioctl_valid(cmd))
6727                 return -ENOTTY;
6728
6729         switch (cmd) {
6730         case RAID_VERSION:
6731         case GET_ARRAY_INFO:
6732         case GET_DISK_INFO:
6733                 break;
6734         default:
6735                 if (!capable(CAP_SYS_ADMIN))
6736                         return -EACCES;
6737         }
6738
6739         /*
6740          * Commands dealing with the RAID driver but not any
6741          * particular array:
6742          */
6743         switch (cmd) {
6744         case RAID_VERSION:
6745                 err = get_version(argp);
6746                 goto out;
6747
6748 #ifndef MODULE
6749         case RAID_AUTORUN:
6750                 err = 0;
6751                 autostart_arrays(arg);
6752                 goto out;
6753 #endif
6754         default:;
6755         }
6756
6757         /*
6758          * Commands creating/starting a new array:
6759          */
6760
6761         mddev = bdev->bd_disk->private_data;
6762
6763         if (!mddev) {
6764                 BUG();
6765                 goto out;
6766         }
6767
6768         /* Some actions do not requires the mutex */
6769         switch (cmd) {
6770         case GET_ARRAY_INFO:
6771                 if (!mddev->raid_disks && !mddev->external)
6772                         err = -ENODEV;
6773                 else
6774                         err = get_array_info(mddev, argp);
6775                 goto out;
6776
6777         case GET_DISK_INFO:
6778                 if (!mddev->raid_disks && !mddev->external)
6779                         err = -ENODEV;
6780                 else
6781                         err = get_disk_info(mddev, argp);
6782                 goto out;
6783
6784         case SET_DISK_FAULTY:
6785                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6786                 goto out;
6787
6788         case GET_BITMAP_FILE:
6789                 err = get_bitmap_file(mddev, argp);
6790                 goto out;
6791
6792         }
6793
6794         if (cmd == ADD_NEW_DISK)
6795                 /* need to ensure md_delayed_delete() has completed */
6796                 flush_workqueue(md_misc_wq);
6797
6798         if (cmd == HOT_REMOVE_DISK)
6799                 /* need to ensure recovery thread has run */
6800                 wait_event_interruptible_timeout(mddev->sb_wait,
6801                                                  !test_bit(MD_RECOVERY_NEEDED,
6802                                                            &mddev->flags),
6803                                                  msecs_to_jiffies(5000));
6804         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6805                 /* Need to flush page cache, and ensure no-one else opens
6806                  * and writes
6807                  */
6808                 mutex_lock(&mddev->open_mutex);
6809                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6810                         mutex_unlock(&mddev->open_mutex);
6811                         err = -EBUSY;
6812                         goto out;
6813                 }
6814                 set_bit(MD_CLOSING, &mddev->flags);
6815                 mutex_unlock(&mddev->open_mutex);
6816                 sync_blockdev(bdev);
6817         }
6818         err = mddev_lock(mddev);
6819         if (err) {
6820                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
6821                          err, cmd);
6822                 goto out;
6823         }
6824
6825         if (cmd == SET_ARRAY_INFO) {
6826                 mdu_array_info_t info;
6827                 if (!arg)
6828                         memset(&info, 0, sizeof(info));
6829                 else if (copy_from_user(&info, argp, sizeof(info))) {
6830                         err = -EFAULT;
6831                         goto unlock;
6832                 }
6833                 if (mddev->pers) {
6834                         err = update_array_info(mddev, &info);
6835                         if (err) {
6836                                 pr_warn("md: couldn't update array info. %d\n", err);
6837                                 goto unlock;
6838                         }
6839                         goto unlock;
6840                 }
6841                 if (!list_empty(&mddev->disks)) {
6842                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
6843                         err = -EBUSY;
6844                         goto unlock;
6845                 }
6846                 if (mddev->raid_disks) {
6847                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
6848                         err = -EBUSY;
6849                         goto unlock;
6850                 }
6851                 err = set_array_info(mddev, &info);
6852                 if (err) {
6853                         pr_warn("md: couldn't set array info. %d\n", err);
6854                         goto unlock;
6855                 }
6856                 goto unlock;
6857         }
6858
6859         /*
6860          * Commands querying/configuring an existing array:
6861          */
6862         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6863          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6864         if ((!mddev->raid_disks && !mddev->external)
6865             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6866             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6867             && cmd != GET_BITMAP_FILE) {
6868                 err = -ENODEV;
6869                 goto unlock;
6870         }
6871
6872         /*
6873          * Commands even a read-only array can execute:
6874          */
6875         switch (cmd) {
6876         case RESTART_ARRAY_RW:
6877                 err = restart_array(mddev);
6878                 goto unlock;
6879
6880         case STOP_ARRAY:
6881                 err = do_md_stop(mddev, 0, bdev);
6882                 goto unlock;
6883
6884         case STOP_ARRAY_RO:
6885                 err = md_set_readonly(mddev, bdev);
6886                 goto unlock;
6887
6888         case HOT_REMOVE_DISK:
6889                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6890                 goto unlock;
6891
6892         case ADD_NEW_DISK:
6893                 /* We can support ADD_NEW_DISK on read-only arrays
6894                  * only if we are re-adding a preexisting device.
6895                  * So require mddev->pers and MD_DISK_SYNC.
6896                  */
6897                 if (mddev->pers) {
6898                         mdu_disk_info_t info;
6899                         if (copy_from_user(&info, argp, sizeof(info)))
6900                                 err = -EFAULT;
6901                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6902                                 /* Need to clear read-only for this */
6903                                 break;
6904                         else
6905                                 err = add_new_disk(mddev, &info);
6906                         goto unlock;
6907                 }
6908                 break;
6909
6910         case BLKROSET:
6911                 if (get_user(ro, (int __user *)(arg))) {
6912                         err = -EFAULT;
6913                         goto unlock;
6914                 }
6915                 err = -EINVAL;
6916
6917                 /* if the bdev is going readonly the value of mddev->ro
6918                  * does not matter, no writes are coming
6919                  */
6920                 if (ro)
6921                         goto unlock;
6922
6923                 /* are we are already prepared for writes? */
6924                 if (mddev->ro != 1)
6925                         goto unlock;
6926
6927                 /* transitioning to readauto need only happen for
6928                  * arrays that call md_write_start
6929                  */
6930                 if (mddev->pers) {
6931                         err = restart_array(mddev);
6932                         if (err == 0) {
6933                                 mddev->ro = 2;
6934                                 set_disk_ro(mddev->gendisk, 0);
6935                         }
6936                 }
6937                 goto unlock;
6938         }
6939
6940         /*
6941          * The remaining ioctls are changing the state of the
6942          * superblock, so we do not allow them on read-only arrays.
6943          */
6944         if (mddev->ro && mddev->pers) {
6945                 if (mddev->ro == 2) {
6946                         mddev->ro = 0;
6947                         sysfs_notify_dirent_safe(mddev->sysfs_state);
6948                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6949                         /* mddev_unlock will wake thread */
6950                         /* If a device failed while we were read-only, we
6951                          * need to make sure the metadata is updated now.
6952                          */
6953                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6954                                 mddev_unlock(mddev);
6955                                 wait_event(mddev->sb_wait,
6956                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6957                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6958                                 mddev_lock_nointr(mddev);
6959                         }
6960                 } else {
6961                         err = -EROFS;
6962                         goto unlock;
6963                 }
6964         }
6965
6966         switch (cmd) {
6967         case ADD_NEW_DISK:
6968         {
6969                 mdu_disk_info_t info;
6970                 if (copy_from_user(&info, argp, sizeof(info)))
6971                         err = -EFAULT;
6972                 else
6973                         err = add_new_disk(mddev, &info);
6974                 goto unlock;
6975         }
6976
6977         case CLUSTERED_DISK_NACK:
6978                 if (mddev_is_clustered(mddev))
6979                         md_cluster_ops->new_disk_ack(mddev, false);
6980                 else
6981                         err = -EINVAL;
6982                 goto unlock;
6983
6984         case HOT_ADD_DISK:
6985                 err = hot_add_disk(mddev, new_decode_dev(arg));
6986                 goto unlock;
6987
6988         case RUN_ARRAY:
6989                 err = do_md_run(mddev);
6990                 goto unlock;
6991
6992         case SET_BITMAP_FILE:
6993                 err = set_bitmap_file(mddev, (int)arg);
6994                 goto unlock;
6995
6996         default:
6997                 err = -EINVAL;
6998                 goto unlock;
6999         }
7000
7001 unlock:
7002         if (mddev->hold_active == UNTIL_IOCTL &&
7003             err != -EINVAL)
7004                 mddev->hold_active = 0;
7005         mddev_unlock(mddev);
7006 out:
7007         return err;
7008 }
7009 #ifdef CONFIG_COMPAT
7010 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7011                     unsigned int cmd, unsigned long arg)
7012 {
7013         switch (cmd) {
7014         case HOT_REMOVE_DISK:
7015         case HOT_ADD_DISK:
7016         case SET_DISK_FAULTY:
7017         case SET_BITMAP_FILE:
7018                 /* These take in integer arg, do not convert */
7019                 break;
7020         default:
7021                 arg = (unsigned long)compat_ptr(arg);
7022                 break;
7023         }
7024
7025         return md_ioctl(bdev, mode, cmd, arg);
7026 }
7027 #endif /* CONFIG_COMPAT */
7028
7029 static int md_open(struct block_device *bdev, fmode_t mode)
7030 {
7031         /*
7032          * Succeed if we can lock the mddev, which confirms that
7033          * it isn't being stopped right now.
7034          */
7035         struct mddev *mddev = mddev_find(bdev->bd_dev);
7036         int err;
7037
7038         if (!mddev)
7039                 return -ENODEV;
7040
7041         if (mddev->gendisk != bdev->bd_disk) {
7042                 /* we are racing with mddev_put which is discarding this
7043                  * bd_disk.
7044                  */
7045                 mddev_put(mddev);
7046                 /* Wait until bdev->bd_disk is definitely gone */
7047                 flush_workqueue(md_misc_wq);
7048                 /* Then retry the open from the top */
7049                 return -ERESTARTSYS;
7050         }
7051         BUG_ON(mddev != bdev->bd_disk->private_data);
7052
7053         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7054                 goto out;
7055
7056         if (test_bit(MD_CLOSING, &mddev->flags)) {
7057                 mutex_unlock(&mddev->open_mutex);
7058                 return -ENODEV;
7059         }
7060
7061         err = 0;
7062         atomic_inc(&mddev->openers);
7063         mutex_unlock(&mddev->open_mutex);
7064
7065         check_disk_change(bdev);
7066  out:
7067         return err;
7068 }
7069
7070 static void md_release(struct gendisk *disk, fmode_t mode)
7071 {
7072         struct mddev *mddev = disk->private_data;
7073
7074         BUG_ON(!mddev);
7075         atomic_dec(&mddev->openers);
7076         mddev_put(mddev);
7077 }
7078
7079 static int md_media_changed(struct gendisk *disk)
7080 {
7081         struct mddev *mddev = disk->private_data;
7082
7083         return mddev->changed;
7084 }
7085
7086 static int md_revalidate(struct gendisk *disk)
7087 {
7088         struct mddev *mddev = disk->private_data;
7089
7090         mddev->changed = 0;
7091         return 0;
7092 }
7093 static const struct block_device_operations md_fops =
7094 {
7095         .owner          = THIS_MODULE,
7096         .open           = md_open,
7097         .release        = md_release,
7098         .ioctl          = md_ioctl,
7099 #ifdef CONFIG_COMPAT
7100         .compat_ioctl   = md_compat_ioctl,
7101 #endif
7102         .getgeo         = md_getgeo,
7103         .media_changed  = md_media_changed,
7104         .revalidate_disk= md_revalidate,
7105 };
7106
7107 static int md_thread(void *arg)
7108 {
7109         struct md_thread *thread = arg;
7110
7111         /*
7112          * md_thread is a 'system-thread', it's priority should be very
7113          * high. We avoid resource deadlocks individually in each
7114          * raid personality. (RAID5 does preallocation) We also use RR and
7115          * the very same RT priority as kswapd, thus we will never get
7116          * into a priority inversion deadlock.
7117          *
7118          * we definitely have to have equal or higher priority than
7119          * bdflush, otherwise bdflush will deadlock if there are too
7120          * many dirty RAID5 blocks.
7121          */
7122
7123         allow_signal(SIGKILL);
7124         while (!kthread_should_stop()) {
7125
7126                 /* We need to wait INTERRUPTIBLE so that
7127                  * we don't add to the load-average.
7128                  * That means we need to be sure no signals are
7129                  * pending
7130                  */
7131                 if (signal_pending(current))
7132                         flush_signals(current);
7133
7134                 wait_event_interruptible_timeout
7135                         (thread->wqueue,
7136                          test_bit(THREAD_WAKEUP, &thread->flags)
7137                          || kthread_should_stop(),
7138                          thread->timeout);
7139
7140                 clear_bit(THREAD_WAKEUP, &thread->flags);
7141                 if (!kthread_should_stop())
7142                         thread->run(thread);
7143         }
7144
7145         return 0;
7146 }
7147
7148 void md_wakeup_thread(struct md_thread *thread)
7149 {
7150         if (thread) {
7151                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7152                 set_bit(THREAD_WAKEUP, &thread->flags);
7153                 wake_up(&thread->wqueue);
7154         }
7155 }
7156 EXPORT_SYMBOL(md_wakeup_thread);
7157
7158 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7159                 struct mddev *mddev, const char *name)
7160 {
7161         struct md_thread *thread;
7162
7163         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7164         if (!thread)
7165                 return NULL;
7166
7167         init_waitqueue_head(&thread->wqueue);
7168
7169         thread->run = run;
7170         thread->mddev = mddev;
7171         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7172         thread->tsk = kthread_run(md_thread, thread,
7173                                   "%s_%s",
7174                                   mdname(thread->mddev),
7175                                   name);
7176         if (IS_ERR(thread->tsk)) {
7177                 kfree(thread);
7178                 return NULL;
7179         }
7180         return thread;
7181 }
7182 EXPORT_SYMBOL(md_register_thread);
7183
7184 void md_unregister_thread(struct md_thread **threadp)
7185 {
7186         struct md_thread *thread = *threadp;
7187         if (!thread)
7188                 return;
7189         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7190         /* Locking ensures that mddev_unlock does not wake_up a
7191          * non-existent thread
7192          */
7193         spin_lock(&pers_lock);
7194         *threadp = NULL;
7195         spin_unlock(&pers_lock);
7196
7197         kthread_stop(thread->tsk);
7198         kfree(thread);
7199 }
7200 EXPORT_SYMBOL(md_unregister_thread);
7201
7202 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7203 {
7204         if (!rdev || test_bit(Faulty, &rdev->flags))
7205                 return;
7206
7207         if (!mddev->pers || !mddev->pers->error_handler)
7208                 return;
7209         mddev->pers->error_handler(mddev,rdev);
7210         if (mddev->degraded)
7211                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7212         sysfs_notify_dirent_safe(rdev->sysfs_state);
7213         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7214         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7215         md_wakeup_thread(mddev->thread);
7216         if (mddev->event_work.func)
7217                 queue_work(md_misc_wq, &mddev->event_work);
7218         md_new_event(mddev);
7219 }
7220 EXPORT_SYMBOL(md_error);
7221
7222 /* seq_file implementation /proc/mdstat */
7223
7224 static void status_unused(struct seq_file *seq)
7225 {
7226         int i = 0;
7227         struct md_rdev *rdev;
7228
7229         seq_printf(seq, "unused devices: ");
7230
7231         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7232                 char b[BDEVNAME_SIZE];
7233                 i++;
7234                 seq_printf(seq, "%s ",
7235                               bdevname(rdev->bdev,b));
7236         }
7237         if (!i)
7238                 seq_printf(seq, "<none>");
7239
7240         seq_printf(seq, "\n");
7241 }
7242
7243 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7244 {
7245         sector_t max_sectors, resync, res;
7246         unsigned long dt, db;
7247         sector_t rt;
7248         int scale;
7249         unsigned int per_milli;
7250
7251         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7252             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7253                 max_sectors = mddev->resync_max_sectors;
7254         else
7255                 max_sectors = mddev->dev_sectors;
7256
7257         resync = mddev->curr_resync;
7258         if (resync <= 3) {
7259                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7260                         /* Still cleaning up */
7261                         resync = max_sectors;
7262         } else
7263                 resync -= atomic_read(&mddev->recovery_active);
7264
7265         if (resync == 0) {
7266                 if (mddev->recovery_cp < MaxSector) {
7267                         seq_printf(seq, "\tresync=PENDING");
7268                         return 1;
7269                 }
7270                 return 0;
7271         }
7272         if (resync < 3) {
7273                 seq_printf(seq, "\tresync=DELAYED");
7274                 return 1;
7275         }
7276
7277         WARN_ON(max_sectors == 0);
7278         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7279          * in a sector_t, and (max_sectors>>scale) will fit in a
7280          * u32, as those are the requirements for sector_div.
7281          * Thus 'scale' must be at least 10
7282          */
7283         scale = 10;
7284         if (sizeof(sector_t) > sizeof(unsigned long)) {
7285                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7286                         scale++;
7287         }
7288         res = (resync>>scale)*1000;
7289         sector_div(res, (u32)((max_sectors>>scale)+1));
7290
7291         per_milli = res;
7292         {
7293                 int i, x = per_milli/50, y = 20-x;
7294                 seq_printf(seq, "[");
7295                 for (i = 0; i < x; i++)
7296                         seq_printf(seq, "=");
7297                 seq_printf(seq, ">");
7298                 for (i = 0; i < y; i++)
7299                         seq_printf(seq, ".");
7300                 seq_printf(seq, "] ");
7301         }
7302         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7303                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7304                     "reshape" :
7305                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7306                      "check" :
7307                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7308                       "resync" : "recovery"))),
7309                    per_milli/10, per_milli % 10,
7310                    (unsigned long long) resync/2,
7311                    (unsigned long long) max_sectors/2);
7312
7313         /*
7314          * dt: time from mark until now
7315          * db: blocks written from mark until now
7316          * rt: remaining time
7317          *
7318          * rt is a sector_t, so could be 32bit or 64bit.
7319          * So we divide before multiply in case it is 32bit and close
7320          * to the limit.
7321          * We scale the divisor (db) by 32 to avoid losing precision
7322          * near the end of resync when the number of remaining sectors
7323          * is close to 'db'.
7324          * We then divide rt by 32 after multiplying by db to compensate.
7325          * The '+1' avoids division by zero if db is very small.
7326          */
7327         dt = ((jiffies - mddev->resync_mark) / HZ);
7328         if (!dt) dt++;
7329         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7330                 - mddev->resync_mark_cnt;
7331
7332         rt = max_sectors - resync;    /* number of remaining sectors */
7333         sector_div(rt, db/32+1);
7334         rt *= dt;
7335         rt >>= 5;
7336
7337         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7338                    ((unsigned long)rt % 60)/6);
7339
7340         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7341         return 1;
7342 }
7343
7344 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7345 {
7346         struct list_head *tmp;
7347         loff_t l = *pos;
7348         struct mddev *mddev;
7349
7350         if (l >= 0x10000)
7351                 return NULL;
7352         if (!l--)
7353                 /* header */
7354                 return (void*)1;
7355
7356         spin_lock(&all_mddevs_lock);
7357         list_for_each(tmp,&all_mddevs)
7358                 if (!l--) {
7359                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7360                         mddev_get(mddev);
7361                         spin_unlock(&all_mddevs_lock);
7362                         return mddev;
7363                 }
7364         spin_unlock(&all_mddevs_lock);
7365         if (!l--)
7366                 return (void*)2;/* tail */
7367         return NULL;
7368 }
7369
7370 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7371 {
7372         struct list_head *tmp;
7373         struct mddev *next_mddev, *mddev = v;
7374
7375         ++*pos;
7376         if (v == (void*)2)
7377                 return NULL;
7378
7379         spin_lock(&all_mddevs_lock);
7380         if (v == (void*)1)
7381                 tmp = all_mddevs.next;
7382         else
7383                 tmp = mddev->all_mddevs.next;
7384         if (tmp != &all_mddevs)
7385                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7386         else {
7387                 next_mddev = (void*)2;
7388                 *pos = 0x10000;
7389         }
7390         spin_unlock(&all_mddevs_lock);
7391
7392         if (v != (void*)1)
7393                 mddev_put(mddev);
7394         return next_mddev;
7395
7396 }
7397
7398 static void md_seq_stop(struct seq_file *seq, void *v)
7399 {
7400         struct mddev *mddev = v;
7401
7402         if (mddev && v != (void*)1 && v != (void*)2)
7403                 mddev_put(mddev);
7404 }
7405
7406 static int md_seq_show(struct seq_file *seq, void *v)
7407 {
7408         struct mddev *mddev = v;
7409         sector_t sectors;
7410         struct md_rdev *rdev;
7411
7412         if (v == (void*)1) {
7413                 struct md_personality *pers;
7414                 seq_printf(seq, "Personalities : ");
7415                 spin_lock(&pers_lock);
7416                 list_for_each_entry(pers, &pers_list, list)
7417                         seq_printf(seq, "[%s] ", pers->name);
7418
7419                 spin_unlock(&pers_lock);
7420                 seq_printf(seq, "\n");
7421                 seq->poll_event = atomic_read(&md_event_count);
7422                 return 0;
7423         }
7424         if (v == (void*)2) {
7425                 status_unused(seq);
7426                 return 0;
7427         }
7428
7429         spin_lock(&mddev->lock);
7430         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7431                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7432                                                 mddev->pers ? "" : "in");
7433                 if (mddev->pers) {
7434                         if (mddev->ro==1)
7435                                 seq_printf(seq, " (read-only)");
7436                         if (mddev->ro==2)
7437                                 seq_printf(seq, " (auto-read-only)");
7438                         seq_printf(seq, " %s", mddev->pers->name);
7439                 }
7440
7441                 sectors = 0;
7442                 rcu_read_lock();
7443                 rdev_for_each_rcu(rdev, mddev) {
7444                         char b[BDEVNAME_SIZE];
7445                         seq_printf(seq, " %s[%d]",
7446                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7447                         if (test_bit(WriteMostly, &rdev->flags))
7448                                 seq_printf(seq, "(W)");
7449                         if (test_bit(Journal, &rdev->flags))
7450                                 seq_printf(seq, "(J)");
7451                         if (test_bit(Faulty, &rdev->flags)) {
7452                                 seq_printf(seq, "(F)");
7453                                 continue;
7454                         }
7455                         if (rdev->raid_disk < 0)
7456                                 seq_printf(seq, "(S)"); /* spare */
7457                         if (test_bit(Replacement, &rdev->flags))
7458                                 seq_printf(seq, "(R)");
7459                         sectors += rdev->sectors;
7460                 }
7461                 rcu_read_unlock();
7462
7463                 if (!list_empty(&mddev->disks)) {
7464                         if (mddev->pers)
7465                                 seq_printf(seq, "\n      %llu blocks",
7466                                            (unsigned long long)
7467                                            mddev->array_sectors / 2);
7468                         else
7469                                 seq_printf(seq, "\n      %llu blocks",
7470                                            (unsigned long long)sectors / 2);
7471                 }
7472                 if (mddev->persistent) {
7473                         if (mddev->major_version != 0 ||
7474                             mddev->minor_version != 90) {
7475                                 seq_printf(seq," super %d.%d",
7476                                            mddev->major_version,
7477                                            mddev->minor_version);
7478                         }
7479                 } else if (mddev->external)
7480                         seq_printf(seq, " super external:%s",
7481                                    mddev->metadata_type);
7482                 else
7483                         seq_printf(seq, " super non-persistent");
7484
7485                 if (mddev->pers) {
7486                         mddev->pers->status(seq, mddev);
7487                         seq_printf(seq, "\n      ");
7488                         if (mddev->pers->sync_request) {
7489                                 if (status_resync(seq, mddev))
7490                                         seq_printf(seq, "\n      ");
7491                         }
7492                 } else
7493                         seq_printf(seq, "\n       ");
7494
7495                 bitmap_status(seq, mddev->bitmap);
7496
7497                 seq_printf(seq, "\n");
7498         }
7499         spin_unlock(&mddev->lock);
7500
7501         return 0;
7502 }
7503
7504 static const struct seq_operations md_seq_ops = {
7505         .start  = md_seq_start,
7506         .next   = md_seq_next,
7507         .stop   = md_seq_stop,
7508         .show   = md_seq_show,
7509 };
7510
7511 static int md_seq_open(struct inode *inode, struct file *file)
7512 {
7513         struct seq_file *seq;
7514         int error;
7515
7516         error = seq_open(file, &md_seq_ops);
7517         if (error)
7518                 return error;
7519
7520         seq = file->private_data;
7521         seq->poll_event = atomic_read(&md_event_count);
7522         return error;
7523 }
7524
7525 static int md_unloading;
7526 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7527 {
7528         struct seq_file *seq = filp->private_data;
7529         int mask;
7530
7531         if (md_unloading)
7532                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7533         poll_wait(filp, &md_event_waiters, wait);
7534
7535         /* always allow read */
7536         mask = POLLIN | POLLRDNORM;
7537
7538         if (seq->poll_event != atomic_read(&md_event_count))
7539                 mask |= POLLERR | POLLPRI;
7540         return mask;
7541 }
7542
7543 static const struct file_operations md_seq_fops = {
7544         .owner          = THIS_MODULE,
7545         .open           = md_seq_open,
7546         .read           = seq_read,
7547         .llseek         = seq_lseek,
7548         .release        = seq_release_private,
7549         .poll           = mdstat_poll,
7550 };
7551
7552 int register_md_personality(struct md_personality *p)
7553 {
7554         pr_debug("md: %s personality registered for level %d\n",
7555                  p->name, p->level);
7556         spin_lock(&pers_lock);
7557         list_add_tail(&p->list, &pers_list);
7558         spin_unlock(&pers_lock);
7559         return 0;
7560 }
7561 EXPORT_SYMBOL(register_md_personality);
7562
7563 int unregister_md_personality(struct md_personality *p)
7564 {
7565         pr_debug("md: %s personality unregistered\n", p->name);
7566         spin_lock(&pers_lock);
7567         list_del_init(&p->list);
7568         spin_unlock(&pers_lock);
7569         return 0;
7570 }
7571 EXPORT_SYMBOL(unregister_md_personality);
7572
7573 int register_md_cluster_operations(struct md_cluster_operations *ops,
7574                                    struct module *module)
7575 {
7576         int ret = 0;
7577         spin_lock(&pers_lock);
7578         if (md_cluster_ops != NULL)
7579                 ret = -EALREADY;
7580         else {
7581                 md_cluster_ops = ops;
7582                 md_cluster_mod = module;
7583         }
7584         spin_unlock(&pers_lock);
7585         return ret;
7586 }
7587 EXPORT_SYMBOL(register_md_cluster_operations);
7588
7589 int unregister_md_cluster_operations(void)
7590 {
7591         spin_lock(&pers_lock);
7592         md_cluster_ops = NULL;
7593         spin_unlock(&pers_lock);
7594         return 0;
7595 }
7596 EXPORT_SYMBOL(unregister_md_cluster_operations);
7597
7598 int md_setup_cluster(struct mddev *mddev, int nodes)
7599 {
7600         if (!md_cluster_ops)
7601                 request_module("md-cluster");
7602         spin_lock(&pers_lock);
7603         /* ensure module won't be unloaded */
7604         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7605                 pr_warn("can't find md-cluster module or get it's reference.\n");
7606                 spin_unlock(&pers_lock);
7607                 return -ENOENT;
7608         }
7609         spin_unlock(&pers_lock);
7610
7611         return md_cluster_ops->join(mddev, nodes);
7612 }
7613
7614 void md_cluster_stop(struct mddev *mddev)
7615 {
7616         if (!md_cluster_ops)
7617                 return;
7618         md_cluster_ops->leave(mddev);
7619         module_put(md_cluster_mod);
7620 }
7621
7622 static int is_mddev_idle(struct mddev *mddev, int init)
7623 {
7624         struct md_rdev *rdev;
7625         int idle;
7626         int curr_events;
7627
7628         idle = 1;
7629         rcu_read_lock();
7630         rdev_for_each_rcu(rdev, mddev) {
7631                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7632                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7633                               (int)part_stat_read(&disk->part0, sectors[1]) -
7634                               atomic_read(&disk->sync_io);
7635                 /* sync IO will cause sync_io to increase before the disk_stats
7636                  * as sync_io is counted when a request starts, and
7637                  * disk_stats is counted when it completes.
7638                  * So resync activity will cause curr_events to be smaller than
7639                  * when there was no such activity.
7640                  * non-sync IO will cause disk_stat to increase without
7641                  * increasing sync_io so curr_events will (eventually)
7642                  * be larger than it was before.  Once it becomes
7643                  * substantially larger, the test below will cause
7644                  * the array to appear non-idle, and resync will slow
7645                  * down.
7646                  * If there is a lot of outstanding resync activity when
7647                  * we set last_event to curr_events, then all that activity
7648                  * completing might cause the array to appear non-idle
7649                  * and resync will be slowed down even though there might
7650                  * not have been non-resync activity.  This will only
7651                  * happen once though.  'last_events' will soon reflect
7652                  * the state where there is little or no outstanding
7653                  * resync requests, and further resync activity will
7654                  * always make curr_events less than last_events.
7655                  *
7656                  */
7657                 if (init || curr_events - rdev->last_events > 64) {
7658                         rdev->last_events = curr_events;
7659                         idle = 0;
7660                 }
7661         }
7662         rcu_read_unlock();
7663         return idle;
7664 }
7665
7666 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7667 {
7668         /* another "blocks" (512byte) blocks have been synced */
7669         atomic_sub(blocks, &mddev->recovery_active);
7670         wake_up(&mddev->recovery_wait);
7671         if (!ok) {
7672                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7673                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7674                 md_wakeup_thread(mddev->thread);
7675                 // stop recovery, signal do_sync ....
7676         }
7677 }
7678 EXPORT_SYMBOL(md_done_sync);
7679
7680 /* md_write_start(mddev, bi)
7681  * If we need to update some array metadata (e.g. 'active' flag
7682  * in superblock) before writing, schedule a superblock update
7683  * and wait for it to complete.
7684  */
7685 void md_write_start(struct mddev *mddev, struct bio *bi)
7686 {
7687         int did_change = 0;
7688         if (bio_data_dir(bi) != WRITE)
7689                 return;
7690
7691         BUG_ON(mddev->ro == 1);
7692         if (mddev->ro == 2) {
7693                 /* need to switch to read/write */
7694                 mddev->ro = 0;
7695                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7696                 md_wakeup_thread(mddev->thread);
7697                 md_wakeup_thread(mddev->sync_thread);
7698                 did_change = 1;
7699         }
7700         atomic_inc(&mddev->writes_pending);
7701         if (mddev->safemode == 1)
7702                 mddev->safemode = 0;
7703         if (mddev->in_sync) {
7704                 spin_lock(&mddev->lock);
7705                 if (mddev->in_sync) {
7706                         mddev->in_sync = 0;
7707                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7708                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7709                         md_wakeup_thread(mddev->thread);
7710                         did_change = 1;
7711                 }
7712                 spin_unlock(&mddev->lock);
7713         }
7714         if (did_change)
7715                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7716         wait_event(mddev->sb_wait,
7717                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7718 }
7719 EXPORT_SYMBOL(md_write_start);
7720
7721 void md_write_end(struct mddev *mddev)
7722 {
7723         if (atomic_dec_and_test(&mddev->writes_pending)) {
7724                 if (mddev->safemode == 2)
7725                         md_wakeup_thread(mddev->thread);
7726                 else if (mddev->safemode_delay)
7727                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7728         }
7729 }
7730 EXPORT_SYMBOL(md_write_end);
7731
7732 /* md_allow_write(mddev)
7733  * Calling this ensures that the array is marked 'active' so that writes
7734  * may proceed without blocking.  It is important to call this before
7735  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7736  * Must be called with mddev_lock held.
7737  *
7738  * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
7739  * is dropped, so return -EAGAIN after notifying userspace.
7740  */
7741 int md_allow_write(struct mddev *mddev)
7742 {
7743         if (!mddev->pers)
7744                 return 0;
7745         if (mddev->ro)
7746                 return 0;
7747         if (!mddev->pers->sync_request)
7748                 return 0;
7749
7750         spin_lock(&mddev->lock);
7751         if (mddev->in_sync) {
7752                 mddev->in_sync = 0;
7753                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7754                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7755                 if (mddev->safemode_delay &&
7756                     mddev->safemode == 0)
7757                         mddev->safemode = 1;
7758                 spin_unlock(&mddev->lock);
7759                 md_update_sb(mddev, 0);
7760                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7761         } else
7762                 spin_unlock(&mddev->lock);
7763
7764         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7765                 return -EAGAIN;
7766         else
7767                 return 0;
7768 }
7769 EXPORT_SYMBOL_GPL(md_allow_write);
7770
7771 #define SYNC_MARKS      10
7772 #define SYNC_MARK_STEP  (3*HZ)
7773 #define UPDATE_FREQUENCY (5*60*HZ)
7774 void md_do_sync(struct md_thread *thread)
7775 {
7776         struct mddev *mddev = thread->mddev;
7777         struct mddev *mddev2;
7778         unsigned int currspeed = 0,
7779                  window;
7780         sector_t max_sectors,j, io_sectors, recovery_done;
7781         unsigned long mark[SYNC_MARKS];
7782         unsigned long update_time;
7783         sector_t mark_cnt[SYNC_MARKS];
7784         int last_mark,m;
7785         struct list_head *tmp;
7786         sector_t last_check;
7787         int skipped = 0;
7788         struct md_rdev *rdev;
7789         char *desc, *action = NULL;
7790         struct blk_plug plug;
7791         int ret;
7792
7793         /* just incase thread restarts... */
7794         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7795                 return;
7796         if (mddev->ro) {/* never try to sync a read-only array */
7797                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7798                 return;
7799         }
7800
7801         if (mddev_is_clustered(mddev)) {
7802                 ret = md_cluster_ops->resync_start(mddev);
7803                 if (ret)
7804                         goto skip;
7805
7806                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
7807                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7808                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
7809                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
7810                      && ((unsigned long long)mddev->curr_resync_completed
7811                          < (unsigned long long)mddev->resync_max_sectors))
7812                         goto skip;
7813         }
7814
7815         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7816                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7817                         desc = "data-check";
7818                         action = "check";
7819                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7820                         desc = "requested-resync";
7821                         action = "repair";
7822                 } else
7823                         desc = "resync";
7824         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7825                 desc = "reshape";
7826         else
7827                 desc = "recovery";
7828
7829         mddev->last_sync_action = action ?: desc;
7830
7831         /* we overload curr_resync somewhat here.
7832          * 0 == not engaged in resync at all
7833          * 2 == checking that there is no conflict with another sync
7834          * 1 == like 2, but have yielded to allow conflicting resync to
7835          *              commense
7836          * other == active in resync - this many blocks
7837          *
7838          * Before starting a resync we must have set curr_resync to
7839          * 2, and then checked that every "conflicting" array has curr_resync
7840          * less than ours.  When we find one that is the same or higher
7841          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7842          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7843          * This will mean we have to start checking from the beginning again.
7844          *
7845          */
7846
7847         do {
7848                 int mddev2_minor = -1;
7849                 mddev->curr_resync = 2;
7850
7851         try_again:
7852                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7853                         goto skip;
7854                 for_each_mddev(mddev2, tmp) {
7855                         if (mddev2 == mddev)
7856                                 continue;
7857                         if (!mddev->parallel_resync
7858                         &&  mddev2->curr_resync
7859                         &&  match_mddev_units(mddev, mddev2)) {
7860                                 DEFINE_WAIT(wq);
7861                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7862                                         /* arbitrarily yield */
7863                                         mddev->curr_resync = 1;
7864                                         wake_up(&resync_wait);
7865                                 }
7866                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7867                                         /* no need to wait here, we can wait the next
7868                                          * time 'round when curr_resync == 2
7869                                          */
7870                                         continue;
7871                                 /* We need to wait 'interruptible' so as not to
7872                                  * contribute to the load average, and not to
7873                                  * be caught by 'softlockup'
7874                                  */
7875                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7876                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7877                                     mddev2->curr_resync >= mddev->curr_resync) {
7878                                         if (mddev2_minor != mddev2->md_minor) {
7879                                                 mddev2_minor = mddev2->md_minor;
7880                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
7881                                                         desc, mdname(mddev),
7882                                                         mdname(mddev2));
7883                                         }
7884                                         mddev_put(mddev2);
7885                                         if (signal_pending(current))
7886                                                 flush_signals(current);
7887                                         schedule();
7888                                         finish_wait(&resync_wait, &wq);
7889                                         goto try_again;
7890                                 }
7891                                 finish_wait(&resync_wait, &wq);
7892                         }
7893                 }
7894         } while (mddev->curr_resync < 2);
7895
7896         j = 0;
7897         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7898                 /* resync follows the size requested by the personality,
7899                  * which defaults to physical size, but can be virtual size
7900                  */
7901                 max_sectors = mddev->resync_max_sectors;
7902                 atomic64_set(&mddev->resync_mismatches, 0);
7903                 /* we don't use the checkpoint if there's a bitmap */
7904                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7905                         j = mddev->resync_min;
7906                 else if (!mddev->bitmap)
7907                         j = mddev->recovery_cp;
7908
7909         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7910                 max_sectors = mddev->resync_max_sectors;
7911         else {
7912                 /* recovery follows the physical size of devices */
7913                 max_sectors = mddev->dev_sectors;
7914                 j = MaxSector;
7915                 rcu_read_lock();
7916                 rdev_for_each_rcu(rdev, mddev)
7917                         if (rdev->raid_disk >= 0 &&
7918                             !test_bit(Journal, &rdev->flags) &&
7919                             !test_bit(Faulty, &rdev->flags) &&
7920                             !test_bit(In_sync, &rdev->flags) &&
7921                             rdev->recovery_offset < j)
7922                                 j = rdev->recovery_offset;
7923                 rcu_read_unlock();
7924
7925                 /* If there is a bitmap, we need to make sure all
7926                  * writes that started before we added a spare
7927                  * complete before we start doing a recovery.
7928                  * Otherwise the write might complete and (via
7929                  * bitmap_endwrite) set a bit in the bitmap after the
7930                  * recovery has checked that bit and skipped that
7931                  * region.
7932                  */
7933                 if (mddev->bitmap) {
7934                         mddev->pers->quiesce(mddev, 1);
7935                         mddev->pers->quiesce(mddev, 0);
7936                 }
7937         }
7938
7939         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
7940         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
7941         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
7942                  speed_max(mddev), desc);
7943
7944         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7945
7946         io_sectors = 0;
7947         for (m = 0; m < SYNC_MARKS; m++) {
7948                 mark[m] = jiffies;
7949                 mark_cnt[m] = io_sectors;
7950         }
7951         last_mark = 0;
7952         mddev->resync_mark = mark[last_mark];
7953         mddev->resync_mark_cnt = mark_cnt[last_mark];
7954
7955         /*
7956          * Tune reconstruction:
7957          */
7958         window = 32*(PAGE_SIZE/512);
7959         pr_debug("md: using %dk window, over a total of %lluk.\n",
7960                  window/2, (unsigned long long)max_sectors/2);
7961
7962         atomic_set(&mddev->recovery_active, 0);
7963         last_check = 0;
7964
7965         if (j>2) {
7966                 pr_debug("md: resuming %s of %s from checkpoint.\n",
7967                          desc, mdname(mddev));
7968                 mddev->curr_resync = j;
7969         } else
7970                 mddev->curr_resync = 3; /* no longer delayed */
7971         mddev->curr_resync_completed = j;
7972         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7973         md_new_event(mddev);
7974         update_time = jiffies;
7975
7976         blk_start_plug(&plug);
7977         while (j < max_sectors) {
7978                 sector_t sectors;
7979
7980                 skipped = 0;
7981
7982                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7983                     ((mddev->curr_resync > mddev->curr_resync_completed &&
7984                       (mddev->curr_resync - mddev->curr_resync_completed)
7985                       > (max_sectors >> 4)) ||
7986                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7987                      (j - mddev->curr_resync_completed)*2
7988                      >= mddev->resync_max - mddev->curr_resync_completed ||
7989                      mddev->curr_resync_completed > mddev->resync_max
7990                             )) {
7991                         /* time to update curr_resync_completed */
7992                         wait_event(mddev->recovery_wait,
7993                                    atomic_read(&mddev->recovery_active) == 0);
7994                         mddev->curr_resync_completed = j;
7995                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7996                             j > mddev->recovery_cp)
7997                                 mddev->recovery_cp = j;
7998                         update_time = jiffies;
7999                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8000                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8001                 }
8002
8003                 while (j >= mddev->resync_max &&
8004                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8005                         /* As this condition is controlled by user-space,
8006                          * we can block indefinitely, so use '_interruptible'
8007                          * to avoid triggering warnings.
8008                          */
8009                         flush_signals(current); /* just in case */
8010                         wait_event_interruptible(mddev->recovery_wait,
8011                                                  mddev->resync_max > j
8012                                                  || test_bit(MD_RECOVERY_INTR,
8013                                                              &mddev->recovery));
8014                 }
8015
8016                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8017                         break;
8018
8019                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8020                 if (sectors == 0) {
8021                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8022                         break;
8023                 }
8024
8025                 if (!skipped) { /* actual IO requested */
8026                         io_sectors += sectors;
8027                         atomic_add(sectors, &mddev->recovery_active);
8028                 }
8029
8030                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8031                         break;
8032
8033                 j += sectors;
8034                 if (j > max_sectors)
8035                         /* when skipping, extra large numbers can be returned. */
8036                         j = max_sectors;
8037                 if (j > 2)
8038                         mddev->curr_resync = j;
8039                 mddev->curr_mark_cnt = io_sectors;
8040                 if (last_check == 0)
8041                         /* this is the earliest that rebuild will be
8042                          * visible in /proc/mdstat
8043                          */
8044                         md_new_event(mddev);
8045
8046                 if (last_check + window > io_sectors || j == max_sectors)
8047                         continue;
8048
8049                 last_check = io_sectors;
8050         repeat:
8051                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8052                         /* step marks */
8053                         int next = (last_mark+1) % SYNC_MARKS;
8054
8055                         mddev->resync_mark = mark[next];
8056                         mddev->resync_mark_cnt = mark_cnt[next];
8057                         mark[next] = jiffies;
8058                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8059                         last_mark = next;
8060                 }
8061
8062                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8063                         break;
8064
8065                 /*
8066                  * this loop exits only if either when we are slower than
8067                  * the 'hard' speed limit, or the system was IO-idle for
8068                  * a jiffy.
8069                  * the system might be non-idle CPU-wise, but we only care
8070                  * about not overloading the IO subsystem. (things like an
8071                  * e2fsck being done on the RAID array should execute fast)
8072                  */
8073                 cond_resched();
8074
8075                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8076                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8077                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8078
8079                 if (currspeed > speed_min(mddev)) {
8080                         if (currspeed > speed_max(mddev)) {
8081                                 msleep(500);
8082                                 goto repeat;
8083                         }
8084                         if (!is_mddev_idle(mddev, 0)) {
8085                                 /*
8086                                  * Give other IO more of a chance.
8087                                  * The faster the devices, the less we wait.
8088                                  */
8089                                 wait_event(mddev->recovery_wait,
8090                                            !atomic_read(&mddev->recovery_active));
8091                         }
8092                 }
8093         }
8094         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8095                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8096                 ? "interrupted" : "done");
8097         /*
8098          * this also signals 'finished resyncing' to md_stop
8099          */
8100         blk_finish_plug(&plug);
8101         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8102
8103         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8104             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8105             mddev->curr_resync > 3) {
8106                 mddev->curr_resync_completed = mddev->curr_resync;
8107                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8108         }
8109         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8110
8111         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8112             mddev->curr_resync > 3) {
8113                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8114                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8115                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8116                                         pr_debug("md: checkpointing %s of %s.\n",
8117                                                  desc, mdname(mddev));
8118                                         if (test_bit(MD_RECOVERY_ERROR,
8119                                                 &mddev->recovery))
8120                                                 mddev->recovery_cp =
8121                                                         mddev->curr_resync_completed;
8122                                         else
8123                                                 mddev->recovery_cp =
8124                                                         mddev->curr_resync;
8125                                 }
8126                         } else
8127                                 mddev->recovery_cp = MaxSector;
8128                 } else {
8129                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8130                                 mddev->curr_resync = MaxSector;
8131                         rcu_read_lock();
8132                         rdev_for_each_rcu(rdev, mddev)
8133                                 if (rdev->raid_disk >= 0 &&
8134                                     mddev->delta_disks >= 0 &&
8135                                     !test_bit(Journal, &rdev->flags) &&
8136                                     !test_bit(Faulty, &rdev->flags) &&
8137                                     !test_bit(In_sync, &rdev->flags) &&
8138                                     rdev->recovery_offset < mddev->curr_resync)
8139                                         rdev->recovery_offset = mddev->curr_resync;
8140                         rcu_read_unlock();
8141                 }
8142         }
8143  skip:
8144         /* set CHANGE_PENDING here since maybe another update is needed,
8145          * so other nodes are informed. It should be harmless for normal
8146          * raid */
8147         set_mask_bits(&mddev->flags, 0,
8148                       BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
8149
8150         spin_lock(&mddev->lock);
8151         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8152                 /* We completed so min/max setting can be forgotten if used. */
8153                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8154                         mddev->resync_min = 0;
8155                 mddev->resync_max = MaxSector;
8156         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8157                 mddev->resync_min = mddev->curr_resync_completed;
8158         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8159         mddev->curr_resync = 0;
8160         spin_unlock(&mddev->lock);
8161
8162         wake_up(&resync_wait);
8163         md_wakeup_thread(mddev->thread);
8164         return;
8165 }
8166 EXPORT_SYMBOL_GPL(md_do_sync);
8167
8168 static int remove_and_add_spares(struct mddev *mddev,
8169                                  struct md_rdev *this)
8170 {
8171         struct md_rdev *rdev;
8172         int spares = 0;
8173         int removed = 0;
8174         bool remove_some = false;
8175
8176         rdev_for_each(rdev, mddev) {
8177                 if ((this == NULL || rdev == this) &&
8178                     rdev->raid_disk >= 0 &&
8179                     !test_bit(Blocked, &rdev->flags) &&
8180                     test_bit(Faulty, &rdev->flags) &&
8181                     atomic_read(&rdev->nr_pending)==0) {
8182                         /* Faulty non-Blocked devices with nr_pending == 0
8183                          * never get nr_pending incremented,
8184                          * never get Faulty cleared, and never get Blocked set.
8185                          * So we can synchronize_rcu now rather than once per device
8186                          */
8187                         remove_some = true;
8188                         set_bit(RemoveSynchronized, &rdev->flags);
8189                 }
8190         }
8191
8192         if (remove_some)
8193                 synchronize_rcu();
8194         rdev_for_each(rdev, mddev) {
8195                 if ((this == NULL || rdev == this) &&
8196                     rdev->raid_disk >= 0 &&
8197                     !test_bit(Blocked, &rdev->flags) &&
8198                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
8199                      (!test_bit(In_sync, &rdev->flags) &&
8200                       !test_bit(Journal, &rdev->flags))) &&
8201                     atomic_read(&rdev->nr_pending)==0)) {
8202                         if (mddev->pers->hot_remove_disk(
8203                                     mddev, rdev) == 0) {
8204                                 sysfs_unlink_rdev(mddev, rdev);
8205                                 rdev->raid_disk = -1;
8206                                 removed++;
8207                         }
8208                 }
8209                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8210                         clear_bit(RemoveSynchronized, &rdev->flags);
8211         }
8212
8213         if (removed && mddev->kobj.sd)
8214                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8215
8216         if (this && removed)
8217                 goto no_add;
8218
8219         rdev_for_each(rdev, mddev) {
8220                 if (this && this != rdev)
8221                         continue;
8222                 if (test_bit(Candidate, &rdev->flags))
8223                         continue;
8224                 if (rdev->raid_disk >= 0 &&
8225                     !test_bit(In_sync, &rdev->flags) &&
8226                     !test_bit(Journal, &rdev->flags) &&
8227                     !test_bit(Faulty, &rdev->flags))
8228                         spares++;
8229                 if (rdev->raid_disk >= 0)
8230                         continue;
8231                 if (test_bit(Faulty, &rdev->flags))
8232                         continue;
8233                 if (!test_bit(Journal, &rdev->flags)) {
8234                         if (mddev->ro &&
8235                             ! (rdev->saved_raid_disk >= 0 &&
8236                                !test_bit(Bitmap_sync, &rdev->flags)))
8237                                 continue;
8238
8239                         rdev->recovery_offset = 0;
8240                 }
8241                 if (mddev->pers->
8242                     hot_add_disk(mddev, rdev) == 0) {
8243                         if (sysfs_link_rdev(mddev, rdev))
8244                                 /* failure here is OK */;
8245                         if (!test_bit(Journal, &rdev->flags))
8246                                 spares++;
8247                         md_new_event(mddev);
8248                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8249                 }
8250         }
8251 no_add:
8252         if (removed)
8253                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8254         return spares;
8255 }
8256
8257 static void md_start_sync(struct work_struct *ws)
8258 {
8259         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8260
8261         mddev->sync_thread = md_register_thread(md_do_sync,
8262                                                 mddev,
8263                                                 "resync");
8264         if (!mddev->sync_thread) {
8265                 pr_warn("%s: could not start resync thread...\n",
8266                         mdname(mddev));
8267                 /* leave the spares where they are, it shouldn't hurt */
8268                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8269                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8270                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8271                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8272                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8273                 wake_up(&resync_wait);
8274                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8275                                        &mddev->recovery))
8276                         if (mddev->sysfs_action)
8277                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8278         } else
8279                 md_wakeup_thread(mddev->sync_thread);
8280         sysfs_notify_dirent_safe(mddev->sysfs_action);
8281         md_new_event(mddev);
8282 }
8283
8284 /*
8285  * This routine is regularly called by all per-raid-array threads to
8286  * deal with generic issues like resync and super-block update.
8287  * Raid personalities that don't have a thread (linear/raid0) do not
8288  * need this as they never do any recovery or update the superblock.
8289  *
8290  * It does not do any resync itself, but rather "forks" off other threads
8291  * to do that as needed.
8292  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8293  * "->recovery" and create a thread at ->sync_thread.
8294  * When the thread finishes it sets MD_RECOVERY_DONE
8295  * and wakeups up this thread which will reap the thread and finish up.
8296  * This thread also removes any faulty devices (with nr_pending == 0).
8297  *
8298  * The overall approach is:
8299  *  1/ if the superblock needs updating, update it.
8300  *  2/ If a recovery thread is running, don't do anything else.
8301  *  3/ If recovery has finished, clean up, possibly marking spares active.
8302  *  4/ If there are any faulty devices, remove them.
8303  *  5/ If array is degraded, try to add spares devices
8304  *  6/ If array has spares or is not in-sync, start a resync thread.
8305  */
8306 void md_check_recovery(struct mddev *mddev)
8307 {
8308         if (mddev->suspended)
8309                 return;
8310
8311         if (mddev->bitmap)
8312                 bitmap_daemon_work(mddev);
8313
8314         if (signal_pending(current)) {
8315                 if (mddev->pers->sync_request && !mddev->external) {
8316                         pr_debug("md: %s in immediate safe mode\n",
8317                                  mdname(mddev));
8318                         mddev->safemode = 2;
8319                 }
8320                 flush_signals(current);
8321         }
8322
8323         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8324                 return;
8325         if ( ! (
8326                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
8327                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8328                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8329                 test_bit(MD_RELOAD_SB, &mddev->flags) ||
8330                 (mddev->external == 0 && mddev->safemode == 1) ||
8331                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8332                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8333                 ))
8334                 return;
8335
8336         if (mddev_trylock(mddev)) {
8337                 int spares = 0;
8338
8339                 if (mddev->ro) {
8340                         struct md_rdev *rdev;
8341                         if (!mddev->external && mddev->in_sync)
8342                                 /* 'Blocked' flag not needed as failed devices
8343                                  * will be recorded if array switched to read/write.
8344                                  * Leaving it set will prevent the device
8345                                  * from being removed.
8346                                  */
8347                                 rdev_for_each(rdev, mddev)
8348                                         clear_bit(Blocked, &rdev->flags);
8349                         /* On a read-only array we can:
8350                          * - remove failed devices
8351                          * - add already-in_sync devices if the array itself
8352                          *   is in-sync.
8353                          * As we only add devices that are already in-sync,
8354                          * we can activate the spares immediately.
8355                          */
8356                         remove_and_add_spares(mddev, NULL);
8357                         /* There is no thread, but we need to call
8358                          * ->spare_active and clear saved_raid_disk
8359                          */
8360                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8361                         md_reap_sync_thread(mddev);
8362                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8363                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8364                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8365                         goto unlock;
8366                 }
8367
8368                 if (mddev_is_clustered(mddev)) {
8369                         struct md_rdev *rdev;
8370                         /* kick the device if another node issued a
8371                          * remove disk.
8372                          */
8373                         rdev_for_each(rdev, mddev) {
8374                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8375                                                 rdev->raid_disk < 0)
8376                                         md_kick_rdev_from_array(rdev);
8377                         }
8378
8379                         if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8380                                 md_reload_sb(mddev, mddev->good_device_nr);
8381                 }
8382
8383                 if (!mddev->external) {
8384                         int did_change = 0;
8385                         spin_lock(&mddev->lock);
8386                         if (mddev->safemode &&
8387                             !atomic_read(&mddev->writes_pending) &&
8388                             !mddev->in_sync &&
8389                             mddev->recovery_cp == MaxSector) {
8390                                 mddev->in_sync = 1;
8391                                 did_change = 1;
8392                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8393                         }
8394                         if (mddev->safemode == 1)
8395                                 mddev->safemode = 0;
8396                         spin_unlock(&mddev->lock);
8397                         if (did_change)
8398                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8399                 }
8400
8401                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
8402                         md_update_sb(mddev, 0);
8403
8404                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8405                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8406                         /* resync/recovery still happening */
8407                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8408                         goto unlock;
8409                 }
8410                 if (mddev->sync_thread) {
8411                         md_reap_sync_thread(mddev);
8412                         goto unlock;
8413                 }
8414                 /* Set RUNNING before clearing NEEDED to avoid
8415                  * any transients in the value of "sync_action".
8416                  */
8417                 mddev->curr_resync_completed = 0;
8418                 spin_lock(&mddev->lock);
8419                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8420                 spin_unlock(&mddev->lock);
8421                 /* Clear some bits that don't mean anything, but
8422                  * might be left set
8423                  */
8424                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8425                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8426
8427                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8428                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8429                         goto not_running;
8430                 /* no recovery is running.
8431                  * remove any failed drives, then
8432                  * add spares if possible.
8433                  * Spares are also removed and re-added, to allow
8434                  * the personality to fail the re-add.
8435                  */
8436
8437                 if (mddev->reshape_position != MaxSector) {
8438                         if (mddev->pers->check_reshape == NULL ||
8439                             mddev->pers->check_reshape(mddev) != 0)
8440                                 /* Cannot proceed */
8441                                 goto not_running;
8442                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8443                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8444                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8445                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8446                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8447                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8448                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8449                 } else if (mddev->recovery_cp < MaxSector) {
8450                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8451                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8452                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8453                         /* nothing to be done ... */
8454                         goto not_running;
8455
8456                 if (mddev->pers->sync_request) {
8457                         if (spares) {
8458                                 /* We are adding a device or devices to an array
8459                                  * which has the bitmap stored on all devices.
8460                                  * So make sure all bitmap pages get written
8461                                  */
8462                                 bitmap_write_all(mddev->bitmap);
8463                         }
8464                         INIT_WORK(&mddev->del_work, md_start_sync);
8465                         queue_work(md_misc_wq, &mddev->del_work);
8466                         goto unlock;
8467                 }
8468         not_running:
8469                 if (!mddev->sync_thread) {
8470                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8471                         wake_up(&resync_wait);
8472                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8473                                                &mddev->recovery))
8474                                 if (mddev->sysfs_action)
8475                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8476                 }
8477         unlock:
8478                 wake_up(&mddev->sb_wait);
8479                 mddev_unlock(mddev);
8480         }
8481 }
8482 EXPORT_SYMBOL(md_check_recovery);
8483
8484 void md_reap_sync_thread(struct mddev *mddev)
8485 {
8486         struct md_rdev *rdev;
8487
8488         /* resync has finished, collect result */
8489         md_unregister_thread(&mddev->sync_thread);
8490         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8491             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8492                 /* success...*/
8493                 /* activate any spares */
8494                 if (mddev->pers->spare_active(mddev)) {
8495                         sysfs_notify(&mddev->kobj, NULL,
8496                                      "degraded");
8497                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8498                 }
8499         }
8500         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8501             mddev->pers->finish_reshape)
8502                 mddev->pers->finish_reshape(mddev);
8503
8504         /* If array is no-longer degraded, then any saved_raid_disk
8505          * information must be scrapped.
8506          */
8507         if (!mddev->degraded)
8508                 rdev_for_each(rdev, mddev)
8509                         rdev->saved_raid_disk = -1;
8510
8511         md_update_sb(mddev, 1);
8512         /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
8513          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8514          * clustered raid */
8515         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8516                 md_cluster_ops->resync_finish(mddev);
8517         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8518         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8519         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8520         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8521         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8522         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8523         wake_up(&resync_wait);
8524         /* flag recovery needed just to double check */
8525         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8526         sysfs_notify_dirent_safe(mddev->sysfs_action);
8527         md_new_event(mddev);
8528         if (mddev->event_work.func)
8529                 queue_work(md_misc_wq, &mddev->event_work);
8530 }
8531 EXPORT_SYMBOL(md_reap_sync_thread);
8532
8533 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8534 {
8535         sysfs_notify_dirent_safe(rdev->sysfs_state);
8536         wait_event_timeout(rdev->blocked_wait,
8537                            !test_bit(Blocked, &rdev->flags) &&
8538                            !test_bit(BlockedBadBlocks, &rdev->flags),
8539                            msecs_to_jiffies(5000));
8540         rdev_dec_pending(rdev, mddev);
8541 }
8542 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8543
8544 void md_finish_reshape(struct mddev *mddev)
8545 {
8546         /* called be personality module when reshape completes. */
8547         struct md_rdev *rdev;
8548
8549         rdev_for_each(rdev, mddev) {
8550                 if (rdev->data_offset > rdev->new_data_offset)
8551                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8552                 else
8553                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8554                 rdev->data_offset = rdev->new_data_offset;
8555         }
8556 }
8557 EXPORT_SYMBOL(md_finish_reshape);
8558
8559 /* Bad block management */
8560
8561 /* Returns 1 on success, 0 on failure */
8562 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8563                        int is_new)
8564 {
8565         struct mddev *mddev = rdev->mddev;
8566         int rv;
8567         if (is_new)
8568                 s += rdev->new_data_offset;
8569         else
8570                 s += rdev->data_offset;
8571         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8572         if (rv == 0) {
8573                 /* Make sure they get written out promptly */
8574                 if (test_bit(ExternalBbl, &rdev->flags))
8575                         sysfs_notify(&rdev->kobj, NULL,
8576                                      "unacknowledged_bad_blocks");
8577                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8578                 set_mask_bits(&mddev->flags, 0,
8579                               BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
8580                 md_wakeup_thread(rdev->mddev->thread);
8581                 return 1;
8582         } else
8583                 return 0;
8584 }
8585 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8586
8587 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8588                          int is_new)
8589 {
8590         int rv;
8591         if (is_new)
8592                 s += rdev->new_data_offset;
8593         else
8594                 s += rdev->data_offset;
8595         rv = badblocks_clear(&rdev->badblocks, s, sectors);
8596         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
8597                 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
8598         return rv;
8599 }
8600 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8601
8602 static int md_notify_reboot(struct notifier_block *this,
8603                             unsigned long code, void *x)
8604 {
8605         struct list_head *tmp;
8606         struct mddev *mddev;
8607         int need_delay = 0;
8608
8609         for_each_mddev(mddev, tmp) {
8610                 if (mddev_trylock(mddev)) {
8611                         if (mddev->pers)
8612                                 __md_stop_writes(mddev);
8613                         if (mddev->persistent)
8614                                 mddev->safemode = 2;
8615                         mddev_unlock(mddev);
8616                 }
8617                 need_delay = 1;
8618         }
8619         /*
8620          * certain more exotic SCSI devices are known to be
8621          * volatile wrt too early system reboots. While the
8622          * right place to handle this issue is the given
8623          * driver, we do want to have a safe RAID driver ...
8624          */
8625         if (need_delay)
8626                 mdelay(1000*1);
8627
8628         return NOTIFY_DONE;
8629 }
8630
8631 static struct notifier_block md_notifier = {
8632         .notifier_call  = md_notify_reboot,
8633         .next           = NULL,
8634         .priority       = INT_MAX, /* before any real devices */
8635 };
8636
8637 static void md_geninit(void)
8638 {
8639         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8640
8641         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8642 }
8643
8644 static int __init md_init(void)
8645 {
8646         int ret = -ENOMEM;
8647
8648         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8649         if (!md_wq)
8650                 goto err_wq;
8651
8652         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8653         if (!md_misc_wq)
8654                 goto err_misc_wq;
8655
8656         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8657                 goto err_md;
8658
8659         if ((ret = register_blkdev(0, "mdp")) < 0)
8660                 goto err_mdp;
8661         mdp_major = ret;
8662
8663         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8664                             md_probe, NULL, NULL);
8665         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8666                             md_probe, NULL, NULL);
8667
8668         register_reboot_notifier(&md_notifier);
8669         raid_table_header = register_sysctl_table(raid_root_table);
8670
8671         md_geninit();
8672         return 0;
8673
8674 err_mdp:
8675         unregister_blkdev(MD_MAJOR, "md");
8676 err_md:
8677         destroy_workqueue(md_misc_wq);
8678 err_misc_wq:
8679         destroy_workqueue(md_wq);
8680 err_wq:
8681         return ret;
8682 }
8683
8684 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8685 {
8686         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8687         struct md_rdev *rdev2;
8688         int role, ret;
8689         char b[BDEVNAME_SIZE];
8690
8691         /* Check for change of roles in the active devices */
8692         rdev_for_each(rdev2, mddev) {
8693                 if (test_bit(Faulty, &rdev2->flags))
8694                         continue;
8695
8696                 /* Check if the roles changed */
8697                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8698
8699                 if (test_bit(Candidate, &rdev2->flags)) {
8700                         if (role == 0xfffe) {
8701                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8702                                 md_kick_rdev_from_array(rdev2);
8703                                 continue;
8704                         }
8705                         else
8706                                 clear_bit(Candidate, &rdev2->flags);
8707                 }
8708
8709                 if (role != rdev2->raid_disk) {
8710                         /* got activated */
8711                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8712                                 rdev2->saved_raid_disk = role;
8713                                 ret = remove_and_add_spares(mddev, rdev2);
8714                                 pr_info("Activated spare: %s\n",
8715                                         bdevname(rdev2->bdev,b));
8716                                 /* wakeup mddev->thread here, so array could
8717                                  * perform resync with the new activated disk */
8718                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8719                                 md_wakeup_thread(mddev->thread);
8720
8721                         }
8722                         /* device faulty
8723                          * We just want to do the minimum to mark the disk
8724                          * as faulty. The recovery is performed by the
8725                          * one who initiated the error.
8726                          */
8727                         if ((role == 0xfffe) || (role == 0xfffd)) {
8728                                 md_error(mddev, rdev2);
8729                                 clear_bit(Blocked, &rdev2->flags);
8730                         }
8731                 }
8732         }
8733
8734         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8735                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8736
8737         /* Finally set the event to be up to date */
8738         mddev->events = le64_to_cpu(sb->events);
8739 }
8740
8741 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8742 {
8743         int err;
8744         struct page *swapout = rdev->sb_page;
8745         struct mdp_superblock_1 *sb;
8746
8747         /* Store the sb page of the rdev in the swapout temporary
8748          * variable in case we err in the future
8749          */
8750         rdev->sb_page = NULL;
8751         err = alloc_disk_sb(rdev);
8752         if (err == 0) {
8753                 ClearPageUptodate(rdev->sb_page);
8754                 rdev->sb_loaded = 0;
8755                 err = super_types[mddev->major_version].
8756                         load_super(rdev, NULL, mddev->minor_version);
8757         }
8758         if (err < 0) {
8759                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8760                                 __func__, __LINE__, rdev->desc_nr, err);
8761                 if (rdev->sb_page)
8762                         put_page(rdev->sb_page);
8763                 rdev->sb_page = swapout;
8764                 rdev->sb_loaded = 1;
8765                 return err;
8766         }
8767
8768         sb = page_address(rdev->sb_page);
8769         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8770          * is not set
8771          */
8772
8773         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8774                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8775
8776         /* The other node finished recovery, call spare_active to set
8777          * device In_sync and mddev->degraded
8778          */
8779         if (rdev->recovery_offset == MaxSector &&
8780             !test_bit(In_sync, &rdev->flags) &&
8781             mddev->pers->spare_active(mddev))
8782                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8783
8784         put_page(swapout);
8785         return 0;
8786 }
8787
8788 void md_reload_sb(struct mddev *mddev, int nr)
8789 {
8790         struct md_rdev *rdev;
8791         int err;
8792
8793         /* Find the rdev */
8794         rdev_for_each_rcu(rdev, mddev) {
8795                 if (rdev->desc_nr == nr)
8796                         break;
8797         }
8798
8799         if (!rdev || rdev->desc_nr != nr) {
8800                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8801                 return;
8802         }
8803
8804         err = read_rdev(mddev, rdev);
8805         if (err < 0)
8806                 return;
8807
8808         check_sb_changes(mddev, rdev);
8809
8810         /* Read all rdev's to update recovery_offset */
8811         rdev_for_each_rcu(rdev, mddev)
8812                 read_rdev(mddev, rdev);
8813 }
8814 EXPORT_SYMBOL(md_reload_sb);
8815
8816 #ifndef MODULE
8817
8818 /*
8819  * Searches all registered partitions for autorun RAID arrays
8820  * at boot time.
8821  */
8822
8823 static DEFINE_MUTEX(detected_devices_mutex);
8824 static LIST_HEAD(all_detected_devices);
8825 struct detected_devices_node {
8826         struct list_head list;
8827         dev_t dev;
8828 };
8829
8830 void md_autodetect_dev(dev_t dev)
8831 {
8832         struct detected_devices_node *node_detected_dev;
8833
8834         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8835         if (node_detected_dev) {
8836                 node_detected_dev->dev = dev;
8837                 mutex_lock(&detected_devices_mutex);
8838                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8839                 mutex_unlock(&detected_devices_mutex);
8840         }
8841 }
8842
8843 static void autostart_arrays(int part)
8844 {
8845         struct md_rdev *rdev;
8846         struct detected_devices_node *node_detected_dev;
8847         dev_t dev;
8848         int i_scanned, i_passed;
8849
8850         i_scanned = 0;
8851         i_passed = 0;
8852
8853         pr_info("md: Autodetecting RAID arrays.\n");
8854
8855         mutex_lock(&detected_devices_mutex);
8856         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8857                 i_scanned++;
8858                 node_detected_dev = list_entry(all_detected_devices.next,
8859                                         struct detected_devices_node, list);
8860                 list_del(&node_detected_dev->list);
8861                 dev = node_detected_dev->dev;
8862                 kfree(node_detected_dev);
8863                 mutex_unlock(&detected_devices_mutex);
8864                 rdev = md_import_device(dev,0, 90);
8865                 mutex_lock(&detected_devices_mutex);
8866                 if (IS_ERR(rdev))
8867                         continue;
8868
8869                 if (test_bit(Faulty, &rdev->flags))
8870                         continue;
8871
8872                 set_bit(AutoDetected, &rdev->flags);
8873                 list_add(&rdev->same_set, &pending_raid_disks);
8874                 i_passed++;
8875         }
8876         mutex_unlock(&detected_devices_mutex);
8877
8878         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
8879
8880         autorun_devices(part);
8881 }
8882
8883 #endif /* !MODULE */
8884
8885 static __exit void md_exit(void)
8886 {
8887         struct mddev *mddev;
8888         struct list_head *tmp;
8889         int delay = 1;
8890
8891         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8892         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8893
8894         unregister_blkdev(MD_MAJOR,"md");
8895         unregister_blkdev(mdp_major, "mdp");
8896         unregister_reboot_notifier(&md_notifier);
8897         unregister_sysctl_table(raid_table_header);
8898
8899         /* We cannot unload the modules while some process is
8900          * waiting for us in select() or poll() - wake them up
8901          */
8902         md_unloading = 1;
8903         while (waitqueue_active(&md_event_waiters)) {
8904                 /* not safe to leave yet */
8905                 wake_up(&md_event_waiters);
8906                 msleep(delay);
8907                 delay += delay;
8908         }
8909         remove_proc_entry("mdstat", NULL);
8910
8911         for_each_mddev(mddev, tmp) {
8912                 export_array(mddev);
8913                 mddev->hold_active = 0;
8914         }
8915         destroy_workqueue(md_misc_wq);
8916         destroy_workqueue(md_wq);
8917 }
8918
8919 subsys_initcall(md_init);
8920 module_exit(md_exit)
8921
8922 static int get_ro(char *buffer, struct kernel_param *kp)
8923 {
8924         return sprintf(buffer, "%d", start_readonly);
8925 }
8926 static int set_ro(const char *val, struct kernel_param *kp)
8927 {
8928         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
8929 }
8930
8931 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8932 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8933 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8934
8935 MODULE_LICENSE("GPL");
8936 MODULE_DESCRIPTION("MD RAID framework");
8937 MODULE_ALIAS("md");
8938 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);