]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/md/md.c
d3cef771e4225104e7f5df5ce30fab8135e0f985
[karo-tx-linux.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33
34    Errors, Warnings, etc.
35    Please use:
36      pr_crit() for error conditions that risk data loss
37      pr_err() for error conditions that are unexpected, like an IO error
38          or internal inconsistency
39      pr_warn() for error conditions that could have been predicated, like
40          adding a device to an array when it has incompatible metadata
41      pr_info() for every interesting, very rare events, like an array starting
42          or stopping, or resync starting or stopping
43      pr_debug() for everything else.
44
45 */
46
47 #include <linux/kthread.h>
48 #include <linux/blkdev.h>
49 #include <linux/badblocks.h>
50 #include <linux/sysctl.h>
51 #include <linux/seq_file.h>
52 #include <linux/fs.h>
53 #include <linux/poll.h>
54 #include <linux/ctype.h>
55 #include <linux/string.h>
56 #include <linux/hdreg.h>
57 #include <linux/proc_fs.h>
58 #include <linux/random.h>
59 #include <linux/module.h>
60 #include <linux/reboot.h>
61 #include <linux/file.h>
62 #include <linux/compat.h>
63 #include <linux/delay.h>
64 #include <linux/raid/md_p.h>
65 #include <linux/raid/md_u.h>
66 #include <linux/slab.h>
67 #include <trace/events/block.h>
68 #include "md.h"
69 #include "bitmap.h"
70 #include "md-cluster.h"
71
72 #ifndef MODULE
73 static void autostart_arrays(int part);
74 #endif
75
76 /* pers_list is a list of registered personalities protected
77  * by pers_lock.
78  * pers_lock does extra service to protect accesses to
79  * mddev->thread when the mutex cannot be held.
80  */
81 static LIST_HEAD(pers_list);
82 static DEFINE_SPINLOCK(pers_lock);
83
84 struct md_cluster_operations *md_cluster_ops;
85 EXPORT_SYMBOL(md_cluster_ops);
86 struct module *md_cluster_mod;
87 EXPORT_SYMBOL(md_cluster_mod);
88
89 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
90 static struct workqueue_struct *md_wq;
91 static struct workqueue_struct *md_misc_wq;
92
93 static int remove_and_add_spares(struct mddev *mddev,
94                                  struct md_rdev *this);
95 static void mddev_detach(struct mddev *mddev);
96
97 /*
98  * Default number of read corrections we'll attempt on an rdev
99  * before ejecting it from the array. We divide the read error
100  * count by 2 for every hour elapsed between read errors.
101  */
102 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
103 /*
104  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
105  * is 1000 KB/sec, so the extra system load does not show up that much.
106  * Increase it if you want to have more _guaranteed_ speed. Note that
107  * the RAID driver will use the maximum available bandwidth if the IO
108  * subsystem is idle. There is also an 'absolute maximum' reconstruction
109  * speed limit - in case reconstruction slows down your system despite
110  * idle IO detection.
111  *
112  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
113  * or /sys/block/mdX/md/sync_speed_{min,max}
114  */
115
116 static int sysctl_speed_limit_min = 1000;
117 static int sysctl_speed_limit_max = 200000;
118 static inline int speed_min(struct mddev *mddev)
119 {
120         return mddev->sync_speed_min ?
121                 mddev->sync_speed_min : sysctl_speed_limit_min;
122 }
123
124 static inline int speed_max(struct mddev *mddev)
125 {
126         return mddev->sync_speed_max ?
127                 mddev->sync_speed_max : sysctl_speed_limit_max;
128 }
129
130 static struct ctl_table_header *raid_table_header;
131
132 static struct ctl_table raid_table[] = {
133         {
134                 .procname       = "speed_limit_min",
135                 .data           = &sysctl_speed_limit_min,
136                 .maxlen         = sizeof(int),
137                 .mode           = S_IRUGO|S_IWUSR,
138                 .proc_handler   = proc_dointvec,
139         },
140         {
141                 .procname       = "speed_limit_max",
142                 .data           = &sysctl_speed_limit_max,
143                 .maxlen         = sizeof(int),
144                 .mode           = S_IRUGO|S_IWUSR,
145                 .proc_handler   = proc_dointvec,
146         },
147         { }
148 };
149
150 static struct ctl_table raid_dir_table[] = {
151         {
152                 .procname       = "raid",
153                 .maxlen         = 0,
154                 .mode           = S_IRUGO|S_IXUGO,
155                 .child          = raid_table,
156         },
157         { }
158 };
159
160 static struct ctl_table raid_root_table[] = {
161         {
162                 .procname       = "dev",
163                 .maxlen         = 0,
164                 .mode           = 0555,
165                 .child          = raid_dir_table,
166         },
167         {  }
168 };
169
170 static const struct block_device_operations md_fops;
171
172 static int start_readonly;
173
174 /* bio_clone_mddev
175  * like bio_clone, but with a local bio set
176  */
177
178 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
179                             struct mddev *mddev)
180 {
181         struct bio *b;
182
183         if (!mddev || !mddev->bio_set)
184                 return bio_alloc(gfp_mask, nr_iovecs);
185
186         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
187         if (!b)
188                 return NULL;
189         return b;
190 }
191 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
192
193 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
194                             struct mddev *mddev)
195 {
196         if (!mddev || !mddev->bio_set)
197                 return bio_clone(bio, gfp_mask);
198
199         return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
200 }
201 EXPORT_SYMBOL_GPL(bio_clone_mddev);
202
203 /*
204  * We have a system wide 'event count' that is incremented
205  * on any 'interesting' event, and readers of /proc/mdstat
206  * can use 'poll' or 'select' to find out when the event
207  * count increases.
208  *
209  * Events are:
210  *  start array, stop array, error, add device, remove device,
211  *  start build, activate spare
212  */
213 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
214 static atomic_t md_event_count;
215 void md_new_event(struct mddev *mddev)
216 {
217         atomic_inc(&md_event_count);
218         wake_up(&md_event_waiters);
219 }
220 EXPORT_SYMBOL_GPL(md_new_event);
221
222 /*
223  * Enables to iterate over all existing md arrays
224  * all_mddevs_lock protects this list.
225  */
226 static LIST_HEAD(all_mddevs);
227 static DEFINE_SPINLOCK(all_mddevs_lock);
228
229 /*
230  * iterates through all used mddevs in the system.
231  * We take care to grab the all_mddevs_lock whenever navigating
232  * the list, and to always hold a refcount when unlocked.
233  * Any code which breaks out of this loop while own
234  * a reference to the current mddev and must mddev_put it.
235  */
236 #define for_each_mddev(_mddev,_tmp)                                     \
237                                                                         \
238         for (({ spin_lock(&all_mddevs_lock);                            \
239                 _tmp = all_mddevs.next;                                 \
240                 _mddev = NULL;});                                       \
241              ({ if (_tmp != &all_mddevs)                                \
242                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
243                 spin_unlock(&all_mddevs_lock);                          \
244                 if (_mddev) mddev_put(_mddev);                          \
245                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
246                 _tmp != &all_mddevs;});                                 \
247              ({ spin_lock(&all_mddevs_lock);                            \
248                 _tmp = _tmp->next;})                                    \
249                 )
250
251 /* Rather than calling directly into the personality make_request function,
252  * IO requests come here first so that we can check if the device is
253  * being suspended pending a reconfiguration.
254  * We hold a refcount over the call to ->make_request.  By the time that
255  * call has finished, the bio has been linked into some internal structure
256  * and so is visible to ->quiesce(), so we don't need the refcount any more.
257  */
258 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
259 {
260         const int rw = bio_data_dir(bio);
261         struct mddev *mddev = q->queuedata;
262         unsigned int sectors;
263         int cpu;
264
265         blk_queue_split(q, &bio, q->bio_split);
266
267         if (mddev == NULL || mddev->pers == NULL) {
268                 bio_io_error(bio);
269                 return BLK_QC_T_NONE;
270         }
271         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
272                 if (bio_sectors(bio) != 0)
273                         bio->bi_error = -EROFS;
274                 bio_endio(bio);
275                 return BLK_QC_T_NONE;
276         }
277         smp_rmb(); /* Ensure implications of  'active' are visible */
278         rcu_read_lock();
279         if (mddev->suspended) {
280                 DEFINE_WAIT(__wait);
281                 for (;;) {
282                         prepare_to_wait(&mddev->sb_wait, &__wait,
283                                         TASK_UNINTERRUPTIBLE);
284                         if (!mddev->suspended)
285                                 break;
286                         rcu_read_unlock();
287                         schedule();
288                         rcu_read_lock();
289                 }
290                 finish_wait(&mddev->sb_wait, &__wait);
291         }
292         atomic_inc(&mddev->active_io);
293         rcu_read_unlock();
294
295         /*
296          * save the sectors now since our bio can
297          * go away inside make_request
298          */
299         sectors = bio_sectors(bio);
300         /* bio could be mergeable after passing to underlayer */
301         bio->bi_opf &= ~REQ_NOMERGE;
302         mddev->pers->make_request(mddev, bio);
303
304         cpu = part_stat_lock();
305         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
306         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
307         part_stat_unlock();
308
309         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
310                 wake_up(&mddev->sb_wait);
311
312         return BLK_QC_T_NONE;
313 }
314
315 /* mddev_suspend makes sure no new requests are submitted
316  * to the device, and that any requests that have been submitted
317  * are completely handled.
318  * Once mddev_detach() is called and completes, the module will be
319  * completely unused.
320  */
321 void mddev_suspend(struct mddev *mddev)
322 {
323         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
324         if (mddev->suspended++)
325                 return;
326         synchronize_rcu();
327         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
328         mddev->pers->quiesce(mddev, 1);
329
330         del_timer_sync(&mddev->safemode_timer);
331 }
332 EXPORT_SYMBOL_GPL(mddev_suspend);
333
334 void mddev_resume(struct mddev *mddev)
335 {
336         if (--mddev->suspended)
337                 return;
338         wake_up(&mddev->sb_wait);
339         mddev->pers->quiesce(mddev, 0);
340
341         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
342         md_wakeup_thread(mddev->thread);
343         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
344 }
345 EXPORT_SYMBOL_GPL(mddev_resume);
346
347 int mddev_congested(struct mddev *mddev, int bits)
348 {
349         struct md_personality *pers = mddev->pers;
350         int ret = 0;
351
352         rcu_read_lock();
353         if (mddev->suspended)
354                 ret = 1;
355         else if (pers && pers->congested)
356                 ret = pers->congested(mddev, bits);
357         rcu_read_unlock();
358         return ret;
359 }
360 EXPORT_SYMBOL_GPL(mddev_congested);
361 static int md_congested(void *data, int bits)
362 {
363         struct mddev *mddev = data;
364         return mddev_congested(mddev, bits);
365 }
366
367 /*
368  * Generic flush handling for md
369  */
370
371 static void md_end_flush(struct bio *bio)
372 {
373         struct md_rdev *rdev = bio->bi_private;
374         struct mddev *mddev = rdev->mddev;
375
376         rdev_dec_pending(rdev, mddev);
377
378         if (atomic_dec_and_test(&mddev->flush_pending)) {
379                 /* The pre-request flush has finished */
380                 queue_work(md_wq, &mddev->flush_work);
381         }
382         bio_put(bio);
383 }
384
385 static void md_submit_flush_data(struct work_struct *ws);
386
387 static void submit_flushes(struct work_struct *ws)
388 {
389         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
390         struct md_rdev *rdev;
391
392         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
393         atomic_set(&mddev->flush_pending, 1);
394         rcu_read_lock();
395         rdev_for_each_rcu(rdev, mddev)
396                 if (rdev->raid_disk >= 0 &&
397                     !test_bit(Faulty, &rdev->flags)) {
398                         /* Take two references, one is dropped
399                          * when request finishes, one after
400                          * we reclaim rcu_read_lock
401                          */
402                         struct bio *bi;
403                         atomic_inc(&rdev->nr_pending);
404                         atomic_inc(&rdev->nr_pending);
405                         rcu_read_unlock();
406                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
407                         bi->bi_end_io = md_end_flush;
408                         bi->bi_private = rdev;
409                         bi->bi_bdev = rdev->bdev;
410                         bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
411                         atomic_inc(&mddev->flush_pending);
412                         submit_bio(bi);
413                         rcu_read_lock();
414                         rdev_dec_pending(rdev, mddev);
415                 }
416         rcu_read_unlock();
417         if (atomic_dec_and_test(&mddev->flush_pending))
418                 queue_work(md_wq, &mddev->flush_work);
419 }
420
421 static void md_submit_flush_data(struct work_struct *ws)
422 {
423         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
424         struct bio *bio = mddev->flush_bio;
425
426         if (bio->bi_iter.bi_size == 0)
427                 /* an empty barrier - all done */
428                 bio_endio(bio);
429         else {
430                 bio->bi_opf &= ~REQ_PREFLUSH;
431                 mddev->pers->make_request(mddev, bio);
432         }
433
434         mddev->flush_bio = NULL;
435         wake_up(&mddev->sb_wait);
436 }
437
438 void md_flush_request(struct mddev *mddev, struct bio *bio)
439 {
440         spin_lock_irq(&mddev->lock);
441         wait_event_lock_irq(mddev->sb_wait,
442                             !mddev->flush_bio,
443                             mddev->lock);
444         mddev->flush_bio = bio;
445         spin_unlock_irq(&mddev->lock);
446
447         INIT_WORK(&mddev->flush_work, submit_flushes);
448         queue_work(md_wq, &mddev->flush_work);
449 }
450 EXPORT_SYMBOL(md_flush_request);
451
452 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
453 {
454         struct mddev *mddev = cb->data;
455         md_wakeup_thread(mddev->thread);
456         kfree(cb);
457 }
458 EXPORT_SYMBOL(md_unplug);
459
460 static inline struct mddev *mddev_get(struct mddev *mddev)
461 {
462         atomic_inc(&mddev->active);
463         return mddev;
464 }
465
466 static void mddev_delayed_delete(struct work_struct *ws);
467
468 static void mddev_put(struct mddev *mddev)
469 {
470         struct bio_set *bs = NULL;
471
472         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
473                 return;
474         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
475             mddev->ctime == 0 && !mddev->hold_active) {
476                 /* Array is not configured at all, and not held active,
477                  * so destroy it */
478                 list_del_init(&mddev->all_mddevs);
479                 bs = mddev->bio_set;
480                 mddev->bio_set = NULL;
481                 if (mddev->gendisk) {
482                         /* We did a probe so need to clean up.  Call
483                          * queue_work inside the spinlock so that
484                          * flush_workqueue() after mddev_find will
485                          * succeed in waiting for the work to be done.
486                          */
487                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
488                         queue_work(md_misc_wq, &mddev->del_work);
489                 } else
490                         kfree(mddev);
491         }
492         spin_unlock(&all_mddevs_lock);
493         if (bs)
494                 bioset_free(bs);
495 }
496
497 static void md_safemode_timeout(unsigned long data);
498
499 void mddev_init(struct mddev *mddev)
500 {
501         mutex_init(&mddev->open_mutex);
502         mutex_init(&mddev->reconfig_mutex);
503         mutex_init(&mddev->bitmap_info.mutex);
504         INIT_LIST_HEAD(&mddev->disks);
505         INIT_LIST_HEAD(&mddev->all_mddevs);
506         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
507                     (unsigned long) mddev);
508         atomic_set(&mddev->active, 1);
509         atomic_set(&mddev->openers, 0);
510         atomic_set(&mddev->active_io, 0);
511         spin_lock_init(&mddev->lock);
512         atomic_set(&mddev->flush_pending, 0);
513         init_waitqueue_head(&mddev->sb_wait);
514         init_waitqueue_head(&mddev->recovery_wait);
515         mddev->reshape_position = MaxSector;
516         mddev->reshape_backwards = 0;
517         mddev->last_sync_action = "none";
518         mddev->resync_min = 0;
519         mddev->resync_max = MaxSector;
520         mddev->level = LEVEL_NONE;
521 }
522 EXPORT_SYMBOL_GPL(mddev_init);
523
524 static struct mddev *mddev_find(dev_t unit)
525 {
526         struct mddev *mddev, *new = NULL;
527
528         if (unit && MAJOR(unit) != MD_MAJOR)
529                 unit &= ~((1<<MdpMinorShift)-1);
530
531  retry:
532         spin_lock(&all_mddevs_lock);
533
534         if (unit) {
535                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
536                         if (mddev->unit == unit) {
537                                 mddev_get(mddev);
538                                 spin_unlock(&all_mddevs_lock);
539                                 kfree(new);
540                                 return mddev;
541                         }
542
543                 if (new) {
544                         list_add(&new->all_mddevs, &all_mddevs);
545                         spin_unlock(&all_mddevs_lock);
546                         new->hold_active = UNTIL_IOCTL;
547                         return new;
548                 }
549         } else if (new) {
550                 /* find an unused unit number */
551                 static int next_minor = 512;
552                 int start = next_minor;
553                 int is_free = 0;
554                 int dev = 0;
555                 while (!is_free) {
556                         dev = MKDEV(MD_MAJOR, next_minor);
557                         next_minor++;
558                         if (next_minor > MINORMASK)
559                                 next_minor = 0;
560                         if (next_minor == start) {
561                                 /* Oh dear, all in use. */
562                                 spin_unlock(&all_mddevs_lock);
563                                 kfree(new);
564                                 return NULL;
565                         }
566
567                         is_free = 1;
568                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
569                                 if (mddev->unit == dev) {
570                                         is_free = 0;
571                                         break;
572                                 }
573                 }
574                 new->unit = dev;
575                 new->md_minor = MINOR(dev);
576                 new->hold_active = UNTIL_STOP;
577                 list_add(&new->all_mddevs, &all_mddevs);
578                 spin_unlock(&all_mddevs_lock);
579                 return new;
580         }
581         spin_unlock(&all_mddevs_lock);
582
583         new = kzalloc(sizeof(*new), GFP_KERNEL);
584         if (!new)
585                 return NULL;
586
587         new->unit = unit;
588         if (MAJOR(unit) == MD_MAJOR)
589                 new->md_minor = MINOR(unit);
590         else
591                 new->md_minor = MINOR(unit) >> MdpMinorShift;
592
593         mddev_init(new);
594
595         goto retry;
596 }
597
598 static struct attribute_group md_redundancy_group;
599
600 void mddev_unlock(struct mddev *mddev)
601 {
602         if (mddev->to_remove) {
603                 /* These cannot be removed under reconfig_mutex as
604                  * an access to the files will try to take reconfig_mutex
605                  * while holding the file unremovable, which leads to
606                  * a deadlock.
607                  * So hold set sysfs_active while the remove in happeing,
608                  * and anything else which might set ->to_remove or my
609                  * otherwise change the sysfs namespace will fail with
610                  * -EBUSY if sysfs_active is still set.
611                  * We set sysfs_active under reconfig_mutex and elsewhere
612                  * test it under the same mutex to ensure its correct value
613                  * is seen.
614                  */
615                 struct attribute_group *to_remove = mddev->to_remove;
616                 mddev->to_remove = NULL;
617                 mddev->sysfs_active = 1;
618                 mutex_unlock(&mddev->reconfig_mutex);
619
620                 if (mddev->kobj.sd) {
621                         if (to_remove != &md_redundancy_group)
622                                 sysfs_remove_group(&mddev->kobj, to_remove);
623                         if (mddev->pers == NULL ||
624                             mddev->pers->sync_request == NULL) {
625                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
626                                 if (mddev->sysfs_action)
627                                         sysfs_put(mddev->sysfs_action);
628                                 mddev->sysfs_action = NULL;
629                         }
630                 }
631                 mddev->sysfs_active = 0;
632         } else
633                 mutex_unlock(&mddev->reconfig_mutex);
634
635         /* As we've dropped the mutex we need a spinlock to
636          * make sure the thread doesn't disappear
637          */
638         spin_lock(&pers_lock);
639         md_wakeup_thread(mddev->thread);
640         spin_unlock(&pers_lock);
641 }
642 EXPORT_SYMBOL_GPL(mddev_unlock);
643
644 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
645 {
646         struct md_rdev *rdev;
647
648         rdev_for_each_rcu(rdev, mddev)
649                 if (rdev->desc_nr == nr)
650                         return rdev;
651
652         return NULL;
653 }
654 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
655
656 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
657 {
658         struct md_rdev *rdev;
659
660         rdev_for_each(rdev, mddev)
661                 if (rdev->bdev->bd_dev == dev)
662                         return rdev;
663
664         return NULL;
665 }
666
667 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
668 {
669         struct md_rdev *rdev;
670
671         rdev_for_each_rcu(rdev, mddev)
672                 if (rdev->bdev->bd_dev == dev)
673                         return rdev;
674
675         return NULL;
676 }
677
678 static struct md_personality *find_pers(int level, char *clevel)
679 {
680         struct md_personality *pers;
681         list_for_each_entry(pers, &pers_list, list) {
682                 if (level != LEVEL_NONE && pers->level == level)
683                         return pers;
684                 if (strcmp(pers->name, clevel)==0)
685                         return pers;
686         }
687         return NULL;
688 }
689
690 /* return the offset of the super block in 512byte sectors */
691 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
692 {
693         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
694         return MD_NEW_SIZE_SECTORS(num_sectors);
695 }
696
697 static int alloc_disk_sb(struct md_rdev *rdev)
698 {
699         rdev->sb_page = alloc_page(GFP_KERNEL);
700         if (!rdev->sb_page)
701                 return -ENOMEM;
702         return 0;
703 }
704
705 void md_rdev_clear(struct md_rdev *rdev)
706 {
707         if (rdev->sb_page) {
708                 put_page(rdev->sb_page);
709                 rdev->sb_loaded = 0;
710                 rdev->sb_page = NULL;
711                 rdev->sb_start = 0;
712                 rdev->sectors = 0;
713         }
714         if (rdev->bb_page) {
715                 put_page(rdev->bb_page);
716                 rdev->bb_page = NULL;
717         }
718         badblocks_exit(&rdev->badblocks);
719 }
720 EXPORT_SYMBOL_GPL(md_rdev_clear);
721
722 static void super_written(struct bio *bio)
723 {
724         struct md_rdev *rdev = bio->bi_private;
725         struct mddev *mddev = rdev->mddev;
726
727         if (bio->bi_error) {
728                 pr_err("md: super_written gets error=%d\n", bio->bi_error);
729                 md_error(mddev, rdev);
730         }
731
732         if (atomic_dec_and_test(&mddev->pending_writes))
733                 wake_up(&mddev->sb_wait);
734         rdev_dec_pending(rdev, mddev);
735         bio_put(bio);
736 }
737
738 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
739                    sector_t sector, int size, struct page *page)
740 {
741         /* write first size bytes of page to sector of rdev
742          * Increment mddev->pending_writes before returning
743          * and decrement it on completion, waking up sb_wait
744          * if zero is reached.
745          * If an error occurred, call md_error
746          */
747         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
748
749         atomic_inc(&rdev->nr_pending);
750
751         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
752         bio->bi_iter.bi_sector = sector;
753         bio_add_page(bio, page, size, 0);
754         bio->bi_private = rdev;
755         bio->bi_end_io = super_written;
756         bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
757
758         atomic_inc(&mddev->pending_writes);
759         submit_bio(bio);
760 }
761
762 void md_super_wait(struct mddev *mddev)
763 {
764         /* wait for all superblock writes that were scheduled to complete */
765         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
766 }
767
768 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
769                  struct page *page, int op, int op_flags, bool metadata_op)
770 {
771         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
772         int ret;
773
774         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
775                 rdev->meta_bdev : rdev->bdev;
776         bio_set_op_attrs(bio, op, op_flags);
777         if (metadata_op)
778                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
779         else if (rdev->mddev->reshape_position != MaxSector &&
780                  (rdev->mddev->reshape_backwards ==
781                   (sector >= rdev->mddev->reshape_position)))
782                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
783         else
784                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
785         bio_add_page(bio, page, size, 0);
786
787         submit_bio_wait(bio);
788
789         ret = !bio->bi_error;
790         bio_put(bio);
791         return ret;
792 }
793 EXPORT_SYMBOL_GPL(sync_page_io);
794
795 static int read_disk_sb(struct md_rdev *rdev, int size)
796 {
797         char b[BDEVNAME_SIZE];
798
799         if (rdev->sb_loaded)
800                 return 0;
801
802         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
803                 goto fail;
804         rdev->sb_loaded = 1;
805         return 0;
806
807 fail:
808         pr_err("md: disabled device %s, could not read superblock.\n",
809                bdevname(rdev->bdev,b));
810         return -EINVAL;
811 }
812
813 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
814 {
815         return  sb1->set_uuid0 == sb2->set_uuid0 &&
816                 sb1->set_uuid1 == sb2->set_uuid1 &&
817                 sb1->set_uuid2 == sb2->set_uuid2 &&
818                 sb1->set_uuid3 == sb2->set_uuid3;
819 }
820
821 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
822 {
823         int ret;
824         mdp_super_t *tmp1, *tmp2;
825
826         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
827         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
828
829         if (!tmp1 || !tmp2) {
830                 ret = 0;
831                 goto abort;
832         }
833
834         *tmp1 = *sb1;
835         *tmp2 = *sb2;
836
837         /*
838          * nr_disks is not constant
839          */
840         tmp1->nr_disks = 0;
841         tmp2->nr_disks = 0;
842
843         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
844 abort:
845         kfree(tmp1);
846         kfree(tmp2);
847         return ret;
848 }
849
850 static u32 md_csum_fold(u32 csum)
851 {
852         csum = (csum & 0xffff) + (csum >> 16);
853         return (csum & 0xffff) + (csum >> 16);
854 }
855
856 static unsigned int calc_sb_csum(mdp_super_t *sb)
857 {
858         u64 newcsum = 0;
859         u32 *sb32 = (u32*)sb;
860         int i;
861         unsigned int disk_csum, csum;
862
863         disk_csum = sb->sb_csum;
864         sb->sb_csum = 0;
865
866         for (i = 0; i < MD_SB_BYTES/4 ; i++)
867                 newcsum += sb32[i];
868         csum = (newcsum & 0xffffffff) + (newcsum>>32);
869
870 #ifdef CONFIG_ALPHA
871         /* This used to use csum_partial, which was wrong for several
872          * reasons including that different results are returned on
873          * different architectures.  It isn't critical that we get exactly
874          * the same return value as before (we always csum_fold before
875          * testing, and that removes any differences).  However as we
876          * know that csum_partial always returned a 16bit value on
877          * alphas, do a fold to maximise conformity to previous behaviour.
878          */
879         sb->sb_csum = md_csum_fold(disk_csum);
880 #else
881         sb->sb_csum = disk_csum;
882 #endif
883         return csum;
884 }
885
886 /*
887  * Handle superblock details.
888  * We want to be able to handle multiple superblock formats
889  * so we have a common interface to them all, and an array of
890  * different handlers.
891  * We rely on user-space to write the initial superblock, and support
892  * reading and updating of superblocks.
893  * Interface methods are:
894  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
895  *      loads and validates a superblock on dev.
896  *      if refdev != NULL, compare superblocks on both devices
897  *    Return:
898  *      0 - dev has a superblock that is compatible with refdev
899  *      1 - dev has a superblock that is compatible and newer than refdev
900  *          so dev should be used as the refdev in future
901  *     -EINVAL superblock incompatible or invalid
902  *     -othererror e.g. -EIO
903  *
904  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
905  *      Verify that dev is acceptable into mddev.
906  *       The first time, mddev->raid_disks will be 0, and data from
907  *       dev should be merged in.  Subsequent calls check that dev
908  *       is new enough.  Return 0 or -EINVAL
909  *
910  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
911  *     Update the superblock for rdev with data in mddev
912  *     This does not write to disc.
913  *
914  */
915
916 struct super_type  {
917         char                *name;
918         struct module       *owner;
919         int                 (*load_super)(struct md_rdev *rdev,
920                                           struct md_rdev *refdev,
921                                           int minor_version);
922         int                 (*validate_super)(struct mddev *mddev,
923                                               struct md_rdev *rdev);
924         void                (*sync_super)(struct mddev *mddev,
925                                           struct md_rdev *rdev);
926         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
927                                                 sector_t num_sectors);
928         int                 (*allow_new_offset)(struct md_rdev *rdev,
929                                                 unsigned long long new_offset);
930 };
931
932 /*
933  * Check that the given mddev has no bitmap.
934  *
935  * This function is called from the run method of all personalities that do not
936  * support bitmaps. It prints an error message and returns non-zero if mddev
937  * has a bitmap. Otherwise, it returns 0.
938  *
939  */
940 int md_check_no_bitmap(struct mddev *mddev)
941 {
942         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
943                 return 0;
944         pr_warn("%s: bitmaps are not supported for %s\n",
945                 mdname(mddev), mddev->pers->name);
946         return 1;
947 }
948 EXPORT_SYMBOL(md_check_no_bitmap);
949
950 /*
951  * load_super for 0.90.0
952  */
953 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
954 {
955         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
956         mdp_super_t *sb;
957         int ret;
958
959         /*
960          * Calculate the position of the superblock (512byte sectors),
961          * it's at the end of the disk.
962          *
963          * It also happens to be a multiple of 4Kb.
964          */
965         rdev->sb_start = calc_dev_sboffset(rdev);
966
967         ret = read_disk_sb(rdev, MD_SB_BYTES);
968         if (ret)
969                 return ret;
970
971         ret = -EINVAL;
972
973         bdevname(rdev->bdev, b);
974         sb = page_address(rdev->sb_page);
975
976         if (sb->md_magic != MD_SB_MAGIC) {
977                 pr_warn("md: invalid raid superblock magic on %s\n", b);
978                 goto abort;
979         }
980
981         if (sb->major_version != 0 ||
982             sb->minor_version < 90 ||
983             sb->minor_version > 91) {
984                 pr_warn("Bad version number %d.%d on %s\n",
985                         sb->major_version, sb->minor_version, b);
986                 goto abort;
987         }
988
989         if (sb->raid_disks <= 0)
990                 goto abort;
991
992         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
993                 pr_warn("md: invalid superblock checksum on %s\n", b);
994                 goto abort;
995         }
996
997         rdev->preferred_minor = sb->md_minor;
998         rdev->data_offset = 0;
999         rdev->new_data_offset = 0;
1000         rdev->sb_size = MD_SB_BYTES;
1001         rdev->badblocks.shift = -1;
1002
1003         if (sb->level == LEVEL_MULTIPATH)
1004                 rdev->desc_nr = -1;
1005         else
1006                 rdev->desc_nr = sb->this_disk.number;
1007
1008         if (!refdev) {
1009                 ret = 1;
1010         } else {
1011                 __u64 ev1, ev2;
1012                 mdp_super_t *refsb = page_address(refdev->sb_page);
1013                 if (!uuid_equal(refsb, sb)) {
1014                         pr_warn("md: %s has different UUID to %s\n",
1015                                 b, bdevname(refdev->bdev,b2));
1016                         goto abort;
1017                 }
1018                 if (!sb_equal(refsb, sb)) {
1019                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1020                                 b, bdevname(refdev->bdev, b2));
1021                         goto abort;
1022                 }
1023                 ev1 = md_event(sb);
1024                 ev2 = md_event(refsb);
1025                 if (ev1 > ev2)
1026                         ret = 1;
1027                 else
1028                         ret = 0;
1029         }
1030         rdev->sectors = rdev->sb_start;
1031         /* Limit to 4TB as metadata cannot record more than that.
1032          * (not needed for Linear and RAID0 as metadata doesn't
1033          * record this size)
1034          */
1035         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1036             sb->level >= 1)
1037                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1038
1039         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1040                 /* "this cannot possibly happen" ... */
1041                 ret = -EINVAL;
1042
1043  abort:
1044         return ret;
1045 }
1046
1047 /*
1048  * validate_super for 0.90.0
1049  */
1050 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1051 {
1052         mdp_disk_t *desc;
1053         mdp_super_t *sb = page_address(rdev->sb_page);
1054         __u64 ev1 = md_event(sb);
1055
1056         rdev->raid_disk = -1;
1057         clear_bit(Faulty, &rdev->flags);
1058         clear_bit(In_sync, &rdev->flags);
1059         clear_bit(Bitmap_sync, &rdev->flags);
1060         clear_bit(WriteMostly, &rdev->flags);
1061
1062         if (mddev->raid_disks == 0) {
1063                 mddev->major_version = 0;
1064                 mddev->minor_version = sb->minor_version;
1065                 mddev->patch_version = sb->patch_version;
1066                 mddev->external = 0;
1067                 mddev->chunk_sectors = sb->chunk_size >> 9;
1068                 mddev->ctime = sb->ctime;
1069                 mddev->utime = sb->utime;
1070                 mddev->level = sb->level;
1071                 mddev->clevel[0] = 0;
1072                 mddev->layout = sb->layout;
1073                 mddev->raid_disks = sb->raid_disks;
1074                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1075                 mddev->events = ev1;
1076                 mddev->bitmap_info.offset = 0;
1077                 mddev->bitmap_info.space = 0;
1078                 /* bitmap can use 60 K after the 4K superblocks */
1079                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1080                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1081                 mddev->reshape_backwards = 0;
1082
1083                 if (mddev->minor_version >= 91) {
1084                         mddev->reshape_position = sb->reshape_position;
1085                         mddev->delta_disks = sb->delta_disks;
1086                         mddev->new_level = sb->new_level;
1087                         mddev->new_layout = sb->new_layout;
1088                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1089                         if (mddev->delta_disks < 0)
1090                                 mddev->reshape_backwards = 1;
1091                 } else {
1092                         mddev->reshape_position = MaxSector;
1093                         mddev->delta_disks = 0;
1094                         mddev->new_level = mddev->level;
1095                         mddev->new_layout = mddev->layout;
1096                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1097                 }
1098
1099                 if (sb->state & (1<<MD_SB_CLEAN))
1100                         mddev->recovery_cp = MaxSector;
1101                 else {
1102                         if (sb->events_hi == sb->cp_events_hi &&
1103                                 sb->events_lo == sb->cp_events_lo) {
1104                                 mddev->recovery_cp = sb->recovery_cp;
1105                         } else
1106                                 mddev->recovery_cp = 0;
1107                 }
1108
1109                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1110                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1111                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1112                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1113
1114                 mddev->max_disks = MD_SB_DISKS;
1115
1116                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1117                     mddev->bitmap_info.file == NULL) {
1118                         mddev->bitmap_info.offset =
1119                                 mddev->bitmap_info.default_offset;
1120                         mddev->bitmap_info.space =
1121                                 mddev->bitmap_info.default_space;
1122                 }
1123
1124         } else if (mddev->pers == NULL) {
1125                 /* Insist on good event counter while assembling, except
1126                  * for spares (which don't need an event count) */
1127                 ++ev1;
1128                 if (sb->disks[rdev->desc_nr].state & (
1129                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1130                         if (ev1 < mddev->events)
1131                                 return -EINVAL;
1132         } else if (mddev->bitmap) {
1133                 /* if adding to array with a bitmap, then we can accept an
1134                  * older device ... but not too old.
1135                  */
1136                 if (ev1 < mddev->bitmap->events_cleared)
1137                         return 0;
1138                 if (ev1 < mddev->events)
1139                         set_bit(Bitmap_sync, &rdev->flags);
1140         } else {
1141                 if (ev1 < mddev->events)
1142                         /* just a hot-add of a new device, leave raid_disk at -1 */
1143                         return 0;
1144         }
1145
1146         if (mddev->level != LEVEL_MULTIPATH) {
1147                 desc = sb->disks + rdev->desc_nr;
1148
1149                 if (desc->state & (1<<MD_DISK_FAULTY))
1150                         set_bit(Faulty, &rdev->flags);
1151                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1152                             desc->raid_disk < mddev->raid_disks */) {
1153                         set_bit(In_sync, &rdev->flags);
1154                         rdev->raid_disk = desc->raid_disk;
1155                         rdev->saved_raid_disk = desc->raid_disk;
1156                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1157                         /* active but not in sync implies recovery up to
1158                          * reshape position.  We don't know exactly where
1159                          * that is, so set to zero for now */
1160                         if (mddev->minor_version >= 91) {
1161                                 rdev->recovery_offset = 0;
1162                                 rdev->raid_disk = desc->raid_disk;
1163                         }
1164                 }
1165                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1166                         set_bit(WriteMostly, &rdev->flags);
1167         } else /* MULTIPATH are always insync */
1168                 set_bit(In_sync, &rdev->flags);
1169         return 0;
1170 }
1171
1172 /*
1173  * sync_super for 0.90.0
1174  */
1175 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1176 {
1177         mdp_super_t *sb;
1178         struct md_rdev *rdev2;
1179         int next_spare = mddev->raid_disks;
1180
1181         /* make rdev->sb match mddev data..
1182          *
1183          * 1/ zero out disks
1184          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1185          * 3/ any empty disks < next_spare become removed
1186          *
1187          * disks[0] gets initialised to REMOVED because
1188          * we cannot be sure from other fields if it has
1189          * been initialised or not.
1190          */
1191         int i;
1192         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1193
1194         rdev->sb_size = MD_SB_BYTES;
1195
1196         sb = page_address(rdev->sb_page);
1197
1198         memset(sb, 0, sizeof(*sb));
1199
1200         sb->md_magic = MD_SB_MAGIC;
1201         sb->major_version = mddev->major_version;
1202         sb->patch_version = mddev->patch_version;
1203         sb->gvalid_words  = 0; /* ignored */
1204         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1205         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1206         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1207         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1208
1209         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1210         sb->level = mddev->level;
1211         sb->size = mddev->dev_sectors / 2;
1212         sb->raid_disks = mddev->raid_disks;
1213         sb->md_minor = mddev->md_minor;
1214         sb->not_persistent = 0;
1215         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1216         sb->state = 0;
1217         sb->events_hi = (mddev->events>>32);
1218         sb->events_lo = (u32)mddev->events;
1219
1220         if (mddev->reshape_position == MaxSector)
1221                 sb->minor_version = 90;
1222         else {
1223                 sb->minor_version = 91;
1224                 sb->reshape_position = mddev->reshape_position;
1225                 sb->new_level = mddev->new_level;
1226                 sb->delta_disks = mddev->delta_disks;
1227                 sb->new_layout = mddev->new_layout;
1228                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1229         }
1230         mddev->minor_version = sb->minor_version;
1231         if (mddev->in_sync)
1232         {
1233                 sb->recovery_cp = mddev->recovery_cp;
1234                 sb->cp_events_hi = (mddev->events>>32);
1235                 sb->cp_events_lo = (u32)mddev->events;
1236                 if (mddev->recovery_cp == MaxSector)
1237                         sb->state = (1<< MD_SB_CLEAN);
1238         } else
1239                 sb->recovery_cp = 0;
1240
1241         sb->layout = mddev->layout;
1242         sb->chunk_size = mddev->chunk_sectors << 9;
1243
1244         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1245                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1246
1247         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1248         rdev_for_each(rdev2, mddev) {
1249                 mdp_disk_t *d;
1250                 int desc_nr;
1251                 int is_active = test_bit(In_sync, &rdev2->flags);
1252
1253                 if (rdev2->raid_disk >= 0 &&
1254                     sb->minor_version >= 91)
1255                         /* we have nowhere to store the recovery_offset,
1256                          * but if it is not below the reshape_position,
1257                          * we can piggy-back on that.
1258                          */
1259                         is_active = 1;
1260                 if (rdev2->raid_disk < 0 ||
1261                     test_bit(Faulty, &rdev2->flags))
1262                         is_active = 0;
1263                 if (is_active)
1264                         desc_nr = rdev2->raid_disk;
1265                 else
1266                         desc_nr = next_spare++;
1267                 rdev2->desc_nr = desc_nr;
1268                 d = &sb->disks[rdev2->desc_nr];
1269                 nr_disks++;
1270                 d->number = rdev2->desc_nr;
1271                 d->major = MAJOR(rdev2->bdev->bd_dev);
1272                 d->minor = MINOR(rdev2->bdev->bd_dev);
1273                 if (is_active)
1274                         d->raid_disk = rdev2->raid_disk;
1275                 else
1276                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1277                 if (test_bit(Faulty, &rdev2->flags))
1278                         d->state = (1<<MD_DISK_FAULTY);
1279                 else if (is_active) {
1280                         d->state = (1<<MD_DISK_ACTIVE);
1281                         if (test_bit(In_sync, &rdev2->flags))
1282                                 d->state |= (1<<MD_DISK_SYNC);
1283                         active++;
1284                         working++;
1285                 } else {
1286                         d->state = 0;
1287                         spare++;
1288                         working++;
1289                 }
1290                 if (test_bit(WriteMostly, &rdev2->flags))
1291                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1292         }
1293         /* now set the "removed" and "faulty" bits on any missing devices */
1294         for (i=0 ; i < mddev->raid_disks ; i++) {
1295                 mdp_disk_t *d = &sb->disks[i];
1296                 if (d->state == 0 && d->number == 0) {
1297                         d->number = i;
1298                         d->raid_disk = i;
1299                         d->state = (1<<MD_DISK_REMOVED);
1300                         d->state |= (1<<MD_DISK_FAULTY);
1301                         failed++;
1302                 }
1303         }
1304         sb->nr_disks = nr_disks;
1305         sb->active_disks = active;
1306         sb->working_disks = working;
1307         sb->failed_disks = failed;
1308         sb->spare_disks = spare;
1309
1310         sb->this_disk = sb->disks[rdev->desc_nr];
1311         sb->sb_csum = calc_sb_csum(sb);
1312 }
1313
1314 /*
1315  * rdev_size_change for 0.90.0
1316  */
1317 static unsigned long long
1318 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1319 {
1320         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1321                 return 0; /* component must fit device */
1322         if (rdev->mddev->bitmap_info.offset)
1323                 return 0; /* can't move bitmap */
1324         rdev->sb_start = calc_dev_sboffset(rdev);
1325         if (!num_sectors || num_sectors > rdev->sb_start)
1326                 num_sectors = rdev->sb_start;
1327         /* Limit to 4TB as metadata cannot record more than that.
1328          * 4TB == 2^32 KB, or 2*2^32 sectors.
1329          */
1330         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1331             rdev->mddev->level >= 1)
1332                 num_sectors = (sector_t)(2ULL << 32) - 2;
1333         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1334                        rdev->sb_page);
1335         md_super_wait(rdev->mddev);
1336         return num_sectors;
1337 }
1338
1339 static int
1340 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1341 {
1342         /* non-zero offset changes not possible with v0.90 */
1343         return new_offset == 0;
1344 }
1345
1346 /*
1347  * version 1 superblock
1348  */
1349
1350 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1351 {
1352         __le32 disk_csum;
1353         u32 csum;
1354         unsigned long long newcsum;
1355         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1356         __le32 *isuper = (__le32*)sb;
1357
1358         disk_csum = sb->sb_csum;
1359         sb->sb_csum = 0;
1360         newcsum = 0;
1361         for (; size >= 4; size -= 4)
1362                 newcsum += le32_to_cpu(*isuper++);
1363
1364         if (size == 2)
1365                 newcsum += le16_to_cpu(*(__le16*) isuper);
1366
1367         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1368         sb->sb_csum = disk_csum;
1369         return cpu_to_le32(csum);
1370 }
1371
1372 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1373 {
1374         struct mdp_superblock_1 *sb;
1375         int ret;
1376         sector_t sb_start;
1377         sector_t sectors;
1378         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1379         int bmask;
1380
1381         /*
1382          * Calculate the position of the superblock in 512byte sectors.
1383          * It is always aligned to a 4K boundary and
1384          * depeding on minor_version, it can be:
1385          * 0: At least 8K, but less than 12K, from end of device
1386          * 1: At start of device
1387          * 2: 4K from start of device.
1388          */
1389         switch(minor_version) {
1390         case 0:
1391                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1392                 sb_start -= 8*2;
1393                 sb_start &= ~(sector_t)(4*2-1);
1394                 break;
1395         case 1:
1396                 sb_start = 0;
1397                 break;
1398         case 2:
1399                 sb_start = 8;
1400                 break;
1401         default:
1402                 return -EINVAL;
1403         }
1404         rdev->sb_start = sb_start;
1405
1406         /* superblock is rarely larger than 1K, but it can be larger,
1407          * and it is safe to read 4k, so we do that
1408          */
1409         ret = read_disk_sb(rdev, 4096);
1410         if (ret) return ret;
1411
1412         sb = page_address(rdev->sb_page);
1413
1414         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1415             sb->major_version != cpu_to_le32(1) ||
1416             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1417             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1418             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1419                 return -EINVAL;
1420
1421         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1422                 pr_warn("md: invalid superblock checksum on %s\n",
1423                         bdevname(rdev->bdev,b));
1424                 return -EINVAL;
1425         }
1426         if (le64_to_cpu(sb->data_size) < 10) {
1427                 pr_warn("md: data_size too small on %s\n",
1428                         bdevname(rdev->bdev,b));
1429                 return -EINVAL;
1430         }
1431         if (sb->pad0 ||
1432             sb->pad3[0] ||
1433             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1434                 /* Some padding is non-zero, might be a new feature */
1435                 return -EINVAL;
1436
1437         rdev->preferred_minor = 0xffff;
1438         rdev->data_offset = le64_to_cpu(sb->data_offset);
1439         rdev->new_data_offset = rdev->data_offset;
1440         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1441             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1442                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1443         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1444
1445         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1446         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1447         if (rdev->sb_size & bmask)
1448                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1449
1450         if (minor_version
1451             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1452                 return -EINVAL;
1453         if (minor_version
1454             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1455                 return -EINVAL;
1456
1457         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1458                 rdev->desc_nr = -1;
1459         else
1460                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1461
1462         if (!rdev->bb_page) {
1463                 rdev->bb_page = alloc_page(GFP_KERNEL);
1464                 if (!rdev->bb_page)
1465                         return -ENOMEM;
1466         }
1467         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1468             rdev->badblocks.count == 0) {
1469                 /* need to load the bad block list.
1470                  * Currently we limit it to one page.
1471                  */
1472                 s32 offset;
1473                 sector_t bb_sector;
1474                 u64 *bbp;
1475                 int i;
1476                 int sectors = le16_to_cpu(sb->bblog_size);
1477                 if (sectors > (PAGE_SIZE / 512))
1478                         return -EINVAL;
1479                 offset = le32_to_cpu(sb->bblog_offset);
1480                 if (offset == 0)
1481                         return -EINVAL;
1482                 bb_sector = (long long)offset;
1483                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1484                                   rdev->bb_page, REQ_OP_READ, 0, true))
1485                         return -EIO;
1486                 bbp = (u64 *)page_address(rdev->bb_page);
1487                 rdev->badblocks.shift = sb->bblog_shift;
1488                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1489                         u64 bb = le64_to_cpu(*bbp);
1490                         int count = bb & (0x3ff);
1491                         u64 sector = bb >> 10;
1492                         sector <<= sb->bblog_shift;
1493                         count <<= sb->bblog_shift;
1494                         if (bb + 1 == 0)
1495                                 break;
1496                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1497                                 return -EINVAL;
1498                 }
1499         } else if (sb->bblog_offset != 0)
1500                 rdev->badblocks.shift = 0;
1501
1502         if (!refdev) {
1503                 ret = 1;
1504         } else {
1505                 __u64 ev1, ev2;
1506                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1507
1508                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1509                     sb->level != refsb->level ||
1510                     sb->layout != refsb->layout ||
1511                     sb->chunksize != refsb->chunksize) {
1512                         pr_warn("md: %s has strangely different superblock to %s\n",
1513                                 bdevname(rdev->bdev,b),
1514                                 bdevname(refdev->bdev,b2));
1515                         return -EINVAL;
1516                 }
1517                 ev1 = le64_to_cpu(sb->events);
1518                 ev2 = le64_to_cpu(refsb->events);
1519
1520                 if (ev1 > ev2)
1521                         ret = 1;
1522                 else
1523                         ret = 0;
1524         }
1525         if (minor_version) {
1526                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1527                 sectors -= rdev->data_offset;
1528         } else
1529                 sectors = rdev->sb_start;
1530         if (sectors < le64_to_cpu(sb->data_size))
1531                 return -EINVAL;
1532         rdev->sectors = le64_to_cpu(sb->data_size);
1533         return ret;
1534 }
1535
1536 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1537 {
1538         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1539         __u64 ev1 = le64_to_cpu(sb->events);
1540
1541         rdev->raid_disk = -1;
1542         clear_bit(Faulty, &rdev->flags);
1543         clear_bit(In_sync, &rdev->flags);
1544         clear_bit(Bitmap_sync, &rdev->flags);
1545         clear_bit(WriteMostly, &rdev->flags);
1546
1547         if (mddev->raid_disks == 0) {
1548                 mddev->major_version = 1;
1549                 mddev->patch_version = 0;
1550                 mddev->external = 0;
1551                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1552                 mddev->ctime = le64_to_cpu(sb->ctime);
1553                 mddev->utime = le64_to_cpu(sb->utime);
1554                 mddev->level = le32_to_cpu(sb->level);
1555                 mddev->clevel[0] = 0;
1556                 mddev->layout = le32_to_cpu(sb->layout);
1557                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1558                 mddev->dev_sectors = le64_to_cpu(sb->size);
1559                 mddev->events = ev1;
1560                 mddev->bitmap_info.offset = 0;
1561                 mddev->bitmap_info.space = 0;
1562                 /* Default location for bitmap is 1K after superblock
1563                  * using 3K - total of 4K
1564                  */
1565                 mddev->bitmap_info.default_offset = 1024 >> 9;
1566                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1567                 mddev->reshape_backwards = 0;
1568
1569                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1570                 memcpy(mddev->uuid, sb->set_uuid, 16);
1571
1572                 mddev->max_disks =  (4096-256)/2;
1573
1574                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1575                     mddev->bitmap_info.file == NULL) {
1576                         mddev->bitmap_info.offset =
1577                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1578                         /* Metadata doesn't record how much space is available.
1579                          * For 1.0, we assume we can use up to the superblock
1580                          * if before, else to 4K beyond superblock.
1581                          * For others, assume no change is possible.
1582                          */
1583                         if (mddev->minor_version > 0)
1584                                 mddev->bitmap_info.space = 0;
1585                         else if (mddev->bitmap_info.offset > 0)
1586                                 mddev->bitmap_info.space =
1587                                         8 - mddev->bitmap_info.offset;
1588                         else
1589                                 mddev->bitmap_info.space =
1590                                         -mddev->bitmap_info.offset;
1591                 }
1592
1593                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1594                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1595                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1596                         mddev->new_level = le32_to_cpu(sb->new_level);
1597                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1598                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1599                         if (mddev->delta_disks < 0 ||
1600                             (mddev->delta_disks == 0 &&
1601                              (le32_to_cpu(sb->feature_map)
1602                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1603                                 mddev->reshape_backwards = 1;
1604                 } else {
1605                         mddev->reshape_position = MaxSector;
1606                         mddev->delta_disks = 0;
1607                         mddev->new_level = mddev->level;
1608                         mddev->new_layout = mddev->layout;
1609                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1610                 }
1611
1612                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1613                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1614         } else if (mddev->pers == NULL) {
1615                 /* Insist of good event counter while assembling, except for
1616                  * spares (which don't need an event count) */
1617                 ++ev1;
1618                 if (rdev->desc_nr >= 0 &&
1619                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1620                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1621                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1622                         if (ev1 < mddev->events)
1623                                 return -EINVAL;
1624         } else if (mddev->bitmap) {
1625                 /* If adding to array with a bitmap, then we can accept an
1626                  * older device, but not too old.
1627                  */
1628                 if (ev1 < mddev->bitmap->events_cleared)
1629                         return 0;
1630                 if (ev1 < mddev->events)
1631                         set_bit(Bitmap_sync, &rdev->flags);
1632         } else {
1633                 if (ev1 < mddev->events)
1634                         /* just a hot-add of a new device, leave raid_disk at -1 */
1635                         return 0;
1636         }
1637         if (mddev->level != LEVEL_MULTIPATH) {
1638                 int role;
1639                 if (rdev->desc_nr < 0 ||
1640                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1641                         role = MD_DISK_ROLE_SPARE;
1642                         rdev->desc_nr = -1;
1643                 } else
1644                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1645                 switch(role) {
1646                 case MD_DISK_ROLE_SPARE: /* spare */
1647                         break;
1648                 case MD_DISK_ROLE_FAULTY: /* faulty */
1649                         set_bit(Faulty, &rdev->flags);
1650                         break;
1651                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1652                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1653                                 /* journal device without journal feature */
1654                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1655                                 return -EINVAL;
1656                         }
1657                         set_bit(Journal, &rdev->flags);
1658                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1659                         rdev->raid_disk = 0;
1660                         break;
1661                 default:
1662                         rdev->saved_raid_disk = role;
1663                         if ((le32_to_cpu(sb->feature_map) &
1664                              MD_FEATURE_RECOVERY_OFFSET)) {
1665                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1666                                 if (!(le32_to_cpu(sb->feature_map) &
1667                                       MD_FEATURE_RECOVERY_BITMAP))
1668                                         rdev->saved_raid_disk = -1;
1669                         } else
1670                                 set_bit(In_sync, &rdev->flags);
1671                         rdev->raid_disk = role;
1672                         break;
1673                 }
1674                 if (sb->devflags & WriteMostly1)
1675                         set_bit(WriteMostly, &rdev->flags);
1676                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1677                         set_bit(Replacement, &rdev->flags);
1678         } else /* MULTIPATH are always insync */
1679                 set_bit(In_sync, &rdev->flags);
1680
1681         return 0;
1682 }
1683
1684 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1685 {
1686         struct mdp_superblock_1 *sb;
1687         struct md_rdev *rdev2;
1688         int max_dev, i;
1689         /* make rdev->sb match mddev and rdev data. */
1690
1691         sb = page_address(rdev->sb_page);
1692
1693         sb->feature_map = 0;
1694         sb->pad0 = 0;
1695         sb->recovery_offset = cpu_to_le64(0);
1696         memset(sb->pad3, 0, sizeof(sb->pad3));
1697
1698         sb->utime = cpu_to_le64((__u64)mddev->utime);
1699         sb->events = cpu_to_le64(mddev->events);
1700         if (mddev->in_sync)
1701                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1702         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1703                 sb->resync_offset = cpu_to_le64(MaxSector);
1704         else
1705                 sb->resync_offset = cpu_to_le64(0);
1706
1707         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1708
1709         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1710         sb->size = cpu_to_le64(mddev->dev_sectors);
1711         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1712         sb->level = cpu_to_le32(mddev->level);
1713         sb->layout = cpu_to_le32(mddev->layout);
1714
1715         if (test_bit(WriteMostly, &rdev->flags))
1716                 sb->devflags |= WriteMostly1;
1717         else
1718                 sb->devflags &= ~WriteMostly1;
1719         sb->data_offset = cpu_to_le64(rdev->data_offset);
1720         sb->data_size = cpu_to_le64(rdev->sectors);
1721
1722         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1723                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1724                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1725         }
1726
1727         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1728             !test_bit(In_sync, &rdev->flags)) {
1729                 sb->feature_map |=
1730                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1731                 sb->recovery_offset =
1732                         cpu_to_le64(rdev->recovery_offset);
1733                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1734                         sb->feature_map |=
1735                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1736         }
1737         /* Note: recovery_offset and journal_tail share space  */
1738         if (test_bit(Journal, &rdev->flags))
1739                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1740         if (test_bit(Replacement, &rdev->flags))
1741                 sb->feature_map |=
1742                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1743
1744         if (mddev->reshape_position != MaxSector) {
1745                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1746                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1747                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1748                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1749                 sb->new_level = cpu_to_le32(mddev->new_level);
1750                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1751                 if (mddev->delta_disks == 0 &&
1752                     mddev->reshape_backwards)
1753                         sb->feature_map
1754                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1755                 if (rdev->new_data_offset != rdev->data_offset) {
1756                         sb->feature_map
1757                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1758                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1759                                                              - rdev->data_offset));
1760                 }
1761         }
1762
1763         if (mddev_is_clustered(mddev))
1764                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1765
1766         if (rdev->badblocks.count == 0)
1767                 /* Nothing to do for bad blocks*/ ;
1768         else if (sb->bblog_offset == 0)
1769                 /* Cannot record bad blocks on this device */
1770                 md_error(mddev, rdev);
1771         else {
1772                 struct badblocks *bb = &rdev->badblocks;
1773                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1774                 u64 *p = bb->page;
1775                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1776                 if (bb->changed) {
1777                         unsigned seq;
1778
1779 retry:
1780                         seq = read_seqbegin(&bb->lock);
1781
1782                         memset(bbp, 0xff, PAGE_SIZE);
1783
1784                         for (i = 0 ; i < bb->count ; i++) {
1785                                 u64 internal_bb = p[i];
1786                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1787                                                 | BB_LEN(internal_bb));
1788                                 bbp[i] = cpu_to_le64(store_bb);
1789                         }
1790                         bb->changed = 0;
1791                         if (read_seqretry(&bb->lock, seq))
1792                                 goto retry;
1793
1794                         bb->sector = (rdev->sb_start +
1795                                       (int)le32_to_cpu(sb->bblog_offset));
1796                         bb->size = le16_to_cpu(sb->bblog_size);
1797                 }
1798         }
1799
1800         max_dev = 0;
1801         rdev_for_each(rdev2, mddev)
1802                 if (rdev2->desc_nr+1 > max_dev)
1803                         max_dev = rdev2->desc_nr+1;
1804
1805         if (max_dev > le32_to_cpu(sb->max_dev)) {
1806                 int bmask;
1807                 sb->max_dev = cpu_to_le32(max_dev);
1808                 rdev->sb_size = max_dev * 2 + 256;
1809                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1810                 if (rdev->sb_size & bmask)
1811                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1812         } else
1813                 max_dev = le32_to_cpu(sb->max_dev);
1814
1815         for (i=0; i<max_dev;i++)
1816                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1817
1818         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1819                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1820
1821         rdev_for_each(rdev2, mddev) {
1822                 i = rdev2->desc_nr;
1823                 if (test_bit(Faulty, &rdev2->flags))
1824                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1825                 else if (test_bit(In_sync, &rdev2->flags))
1826                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1827                 else if (test_bit(Journal, &rdev2->flags))
1828                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1829                 else if (rdev2->raid_disk >= 0)
1830                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1831                 else
1832                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1833         }
1834
1835         sb->sb_csum = calc_sb_1_csum(sb);
1836 }
1837
1838 static unsigned long long
1839 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1840 {
1841         struct mdp_superblock_1 *sb;
1842         sector_t max_sectors;
1843         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1844                 return 0; /* component must fit device */
1845         if (rdev->data_offset != rdev->new_data_offset)
1846                 return 0; /* too confusing */
1847         if (rdev->sb_start < rdev->data_offset) {
1848                 /* minor versions 1 and 2; superblock before data */
1849                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1850                 max_sectors -= rdev->data_offset;
1851                 if (!num_sectors || num_sectors > max_sectors)
1852                         num_sectors = max_sectors;
1853         } else if (rdev->mddev->bitmap_info.offset) {
1854                 /* minor version 0 with bitmap we can't move */
1855                 return 0;
1856         } else {
1857                 /* minor version 0; superblock after data */
1858                 sector_t sb_start;
1859                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1860                 sb_start &= ~(sector_t)(4*2 - 1);
1861                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1862                 if (!num_sectors || num_sectors > max_sectors)
1863                         num_sectors = max_sectors;
1864                 rdev->sb_start = sb_start;
1865         }
1866         sb = page_address(rdev->sb_page);
1867         sb->data_size = cpu_to_le64(num_sectors);
1868         sb->super_offset = rdev->sb_start;
1869         sb->sb_csum = calc_sb_1_csum(sb);
1870         md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1871                        rdev->sb_page);
1872         md_super_wait(rdev->mddev);
1873         return num_sectors;
1874
1875 }
1876
1877 static int
1878 super_1_allow_new_offset(struct md_rdev *rdev,
1879                          unsigned long long new_offset)
1880 {
1881         /* All necessary checks on new >= old have been done */
1882         struct bitmap *bitmap;
1883         if (new_offset >= rdev->data_offset)
1884                 return 1;
1885
1886         /* with 1.0 metadata, there is no metadata to tread on
1887          * so we can always move back */
1888         if (rdev->mddev->minor_version == 0)
1889                 return 1;
1890
1891         /* otherwise we must be sure not to step on
1892          * any metadata, so stay:
1893          * 36K beyond start of superblock
1894          * beyond end of badblocks
1895          * beyond write-intent bitmap
1896          */
1897         if (rdev->sb_start + (32+4)*2 > new_offset)
1898                 return 0;
1899         bitmap = rdev->mddev->bitmap;
1900         if (bitmap && !rdev->mddev->bitmap_info.file &&
1901             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1902             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1903                 return 0;
1904         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1905                 return 0;
1906
1907         return 1;
1908 }
1909
1910 static struct super_type super_types[] = {
1911         [0] = {
1912                 .name   = "0.90.0",
1913                 .owner  = THIS_MODULE,
1914                 .load_super         = super_90_load,
1915                 .validate_super     = super_90_validate,
1916                 .sync_super         = super_90_sync,
1917                 .rdev_size_change   = super_90_rdev_size_change,
1918                 .allow_new_offset   = super_90_allow_new_offset,
1919         },
1920         [1] = {
1921                 .name   = "md-1",
1922                 .owner  = THIS_MODULE,
1923                 .load_super         = super_1_load,
1924                 .validate_super     = super_1_validate,
1925                 .sync_super         = super_1_sync,
1926                 .rdev_size_change   = super_1_rdev_size_change,
1927                 .allow_new_offset   = super_1_allow_new_offset,
1928         },
1929 };
1930
1931 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1932 {
1933         if (mddev->sync_super) {
1934                 mddev->sync_super(mddev, rdev);
1935                 return;
1936         }
1937
1938         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1939
1940         super_types[mddev->major_version].sync_super(mddev, rdev);
1941 }
1942
1943 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1944 {
1945         struct md_rdev *rdev, *rdev2;
1946
1947         rcu_read_lock();
1948         rdev_for_each_rcu(rdev, mddev1) {
1949                 if (test_bit(Faulty, &rdev->flags) ||
1950                     test_bit(Journal, &rdev->flags) ||
1951                     rdev->raid_disk == -1)
1952                         continue;
1953                 rdev_for_each_rcu(rdev2, mddev2) {
1954                         if (test_bit(Faulty, &rdev2->flags) ||
1955                             test_bit(Journal, &rdev2->flags) ||
1956                             rdev2->raid_disk == -1)
1957                                 continue;
1958                         if (rdev->bdev->bd_contains ==
1959                             rdev2->bdev->bd_contains) {
1960                                 rcu_read_unlock();
1961                                 return 1;
1962                         }
1963                 }
1964         }
1965         rcu_read_unlock();
1966         return 0;
1967 }
1968
1969 static LIST_HEAD(pending_raid_disks);
1970
1971 /*
1972  * Try to register data integrity profile for an mddev
1973  *
1974  * This is called when an array is started and after a disk has been kicked
1975  * from the array. It only succeeds if all working and active component devices
1976  * are integrity capable with matching profiles.
1977  */
1978 int md_integrity_register(struct mddev *mddev)
1979 {
1980         struct md_rdev *rdev, *reference = NULL;
1981
1982         if (list_empty(&mddev->disks))
1983                 return 0; /* nothing to do */
1984         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1985                 return 0; /* shouldn't register, or already is */
1986         rdev_for_each(rdev, mddev) {
1987                 /* skip spares and non-functional disks */
1988                 if (test_bit(Faulty, &rdev->flags))
1989                         continue;
1990                 if (rdev->raid_disk < 0)
1991                         continue;
1992                 if (!reference) {
1993                         /* Use the first rdev as the reference */
1994                         reference = rdev;
1995                         continue;
1996                 }
1997                 /* does this rdev's profile match the reference profile? */
1998                 if (blk_integrity_compare(reference->bdev->bd_disk,
1999                                 rdev->bdev->bd_disk) < 0)
2000                         return -EINVAL;
2001         }
2002         if (!reference || !bdev_get_integrity(reference->bdev))
2003                 return 0;
2004         /*
2005          * All component devices are integrity capable and have matching
2006          * profiles, register the common profile for the md device.
2007          */
2008         blk_integrity_register(mddev->gendisk,
2009                                bdev_get_integrity(reference->bdev));
2010
2011         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2012         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2013                 pr_err("md: failed to create integrity pool for %s\n",
2014                        mdname(mddev));
2015                 return -EINVAL;
2016         }
2017         return 0;
2018 }
2019 EXPORT_SYMBOL(md_integrity_register);
2020
2021 /*
2022  * Attempt to add an rdev, but only if it is consistent with the current
2023  * integrity profile
2024  */
2025 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2026 {
2027         struct blk_integrity *bi_rdev;
2028         struct blk_integrity *bi_mddev;
2029         char name[BDEVNAME_SIZE];
2030
2031         if (!mddev->gendisk)
2032                 return 0;
2033
2034         bi_rdev = bdev_get_integrity(rdev->bdev);
2035         bi_mddev = blk_get_integrity(mddev->gendisk);
2036
2037         if (!bi_mddev) /* nothing to do */
2038                 return 0;
2039
2040         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2041                 pr_err("%s: incompatible integrity profile for %s\n",
2042                        mdname(mddev), bdevname(rdev->bdev, name));
2043                 return -ENXIO;
2044         }
2045
2046         return 0;
2047 }
2048 EXPORT_SYMBOL(md_integrity_add_rdev);
2049
2050 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2051 {
2052         char b[BDEVNAME_SIZE];
2053         struct kobject *ko;
2054         int err;
2055
2056         /* prevent duplicates */
2057         if (find_rdev(mddev, rdev->bdev->bd_dev))
2058                 return -EEXIST;
2059
2060         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2061         if (!test_bit(Journal, &rdev->flags) &&
2062             rdev->sectors &&
2063             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2064                 if (mddev->pers) {
2065                         /* Cannot change size, so fail
2066                          * If mddev->level <= 0, then we don't care
2067                          * about aligning sizes (e.g. linear)
2068                          */
2069                         if (mddev->level > 0)
2070                                 return -ENOSPC;
2071                 } else
2072                         mddev->dev_sectors = rdev->sectors;
2073         }
2074
2075         /* Verify rdev->desc_nr is unique.
2076          * If it is -1, assign a free number, else
2077          * check number is not in use
2078          */
2079         rcu_read_lock();
2080         if (rdev->desc_nr < 0) {
2081                 int choice = 0;
2082                 if (mddev->pers)
2083                         choice = mddev->raid_disks;
2084                 while (md_find_rdev_nr_rcu(mddev, choice))
2085                         choice++;
2086                 rdev->desc_nr = choice;
2087         } else {
2088                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2089                         rcu_read_unlock();
2090                         return -EBUSY;
2091                 }
2092         }
2093         rcu_read_unlock();
2094         if (!test_bit(Journal, &rdev->flags) &&
2095             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2096                 pr_warn("md: %s: array is limited to %d devices\n",
2097                         mdname(mddev), mddev->max_disks);
2098                 return -EBUSY;
2099         }
2100         bdevname(rdev->bdev,b);
2101         strreplace(b, '/', '!');
2102
2103         rdev->mddev = mddev;
2104         pr_debug("md: bind<%s>\n", b);
2105
2106         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2107                 goto fail;
2108
2109         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2110         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2111                 /* failure here is OK */;
2112         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2113
2114         list_add_rcu(&rdev->same_set, &mddev->disks);
2115         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2116
2117         /* May as well allow recovery to be retried once */
2118         mddev->recovery_disabled++;
2119
2120         return 0;
2121
2122  fail:
2123         pr_warn("md: failed to register dev-%s for %s\n",
2124                 b, mdname(mddev));
2125         return err;
2126 }
2127
2128 static void md_delayed_delete(struct work_struct *ws)
2129 {
2130         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2131         kobject_del(&rdev->kobj);
2132         kobject_put(&rdev->kobj);
2133 }
2134
2135 static void unbind_rdev_from_array(struct md_rdev *rdev)
2136 {
2137         char b[BDEVNAME_SIZE];
2138
2139         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2140         list_del_rcu(&rdev->same_set);
2141         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2142         rdev->mddev = NULL;
2143         sysfs_remove_link(&rdev->kobj, "block");
2144         sysfs_put(rdev->sysfs_state);
2145         rdev->sysfs_state = NULL;
2146         rdev->badblocks.count = 0;
2147         /* We need to delay this, otherwise we can deadlock when
2148          * writing to 'remove' to "dev/state".  We also need
2149          * to delay it due to rcu usage.
2150          */
2151         synchronize_rcu();
2152         INIT_WORK(&rdev->del_work, md_delayed_delete);
2153         kobject_get(&rdev->kobj);
2154         queue_work(md_misc_wq, &rdev->del_work);
2155 }
2156
2157 /*
2158  * prevent the device from being mounted, repartitioned or
2159  * otherwise reused by a RAID array (or any other kernel
2160  * subsystem), by bd_claiming the device.
2161  */
2162 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2163 {
2164         int err = 0;
2165         struct block_device *bdev;
2166         char b[BDEVNAME_SIZE];
2167
2168         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2169                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2170         if (IS_ERR(bdev)) {
2171                 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2172                 return PTR_ERR(bdev);
2173         }
2174         rdev->bdev = bdev;
2175         return err;
2176 }
2177
2178 static void unlock_rdev(struct md_rdev *rdev)
2179 {
2180         struct block_device *bdev = rdev->bdev;
2181         rdev->bdev = NULL;
2182         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2183 }
2184
2185 void md_autodetect_dev(dev_t dev);
2186
2187 static void export_rdev(struct md_rdev *rdev)
2188 {
2189         char b[BDEVNAME_SIZE];
2190
2191         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2192         md_rdev_clear(rdev);
2193 #ifndef MODULE
2194         if (test_bit(AutoDetected, &rdev->flags))
2195                 md_autodetect_dev(rdev->bdev->bd_dev);
2196 #endif
2197         unlock_rdev(rdev);
2198         kobject_put(&rdev->kobj);
2199 }
2200
2201 void md_kick_rdev_from_array(struct md_rdev *rdev)
2202 {
2203         unbind_rdev_from_array(rdev);
2204         export_rdev(rdev);
2205 }
2206 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2207
2208 static void export_array(struct mddev *mddev)
2209 {
2210         struct md_rdev *rdev;
2211
2212         while (!list_empty(&mddev->disks)) {
2213                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2214                                         same_set);
2215                 md_kick_rdev_from_array(rdev);
2216         }
2217         mddev->raid_disks = 0;
2218         mddev->major_version = 0;
2219 }
2220
2221 static void sync_sbs(struct mddev *mddev, int nospares)
2222 {
2223         /* Update each superblock (in-memory image), but
2224          * if we are allowed to, skip spares which already
2225          * have the right event counter, or have one earlier
2226          * (which would mean they aren't being marked as dirty
2227          * with the rest of the array)
2228          */
2229         struct md_rdev *rdev;
2230         rdev_for_each(rdev, mddev) {
2231                 if (rdev->sb_events == mddev->events ||
2232                     (nospares &&
2233                      rdev->raid_disk < 0 &&
2234                      rdev->sb_events+1 == mddev->events)) {
2235                         /* Don't update this superblock */
2236                         rdev->sb_loaded = 2;
2237                 } else {
2238                         sync_super(mddev, rdev);
2239                         rdev->sb_loaded = 1;
2240                 }
2241         }
2242 }
2243
2244 static bool does_sb_need_changing(struct mddev *mddev)
2245 {
2246         struct md_rdev *rdev;
2247         struct mdp_superblock_1 *sb;
2248         int role;
2249
2250         /* Find a good rdev */
2251         rdev_for_each(rdev, mddev)
2252                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2253                         break;
2254
2255         /* No good device found. */
2256         if (!rdev)
2257                 return false;
2258
2259         sb = page_address(rdev->sb_page);
2260         /* Check if a device has become faulty or a spare become active */
2261         rdev_for_each(rdev, mddev) {
2262                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2263                 /* Device activated? */
2264                 if (role == 0xffff && rdev->raid_disk >=0 &&
2265                     !test_bit(Faulty, &rdev->flags))
2266                         return true;
2267                 /* Device turned faulty? */
2268                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2269                         return true;
2270         }
2271
2272         /* Check if any mddev parameters have changed */
2273         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2274             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2275             (mddev->layout != le64_to_cpu(sb->layout)) ||
2276             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2277             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2278                 return true;
2279
2280         return false;
2281 }
2282
2283 void md_update_sb(struct mddev *mddev, int force_change)
2284 {
2285         struct md_rdev *rdev;
2286         int sync_req;
2287         int nospares = 0;
2288         int any_badblocks_changed = 0;
2289         int ret = -1;
2290
2291         if (mddev->ro) {
2292                 if (force_change)
2293                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2294                 return;
2295         }
2296
2297 repeat:
2298         if (mddev_is_clustered(mddev)) {
2299                 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2300                         force_change = 1;
2301                 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2302                         nospares = 1;
2303                 ret = md_cluster_ops->metadata_update_start(mddev);
2304                 /* Has someone else has updated the sb */
2305                 if (!does_sb_need_changing(mddev)) {
2306                         if (ret == 0)
2307                                 md_cluster_ops->metadata_update_cancel(mddev);
2308                         bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2309                                                          BIT(MD_CHANGE_DEVS) |
2310                                                          BIT(MD_CHANGE_CLEAN));
2311                         return;
2312                 }
2313         }
2314
2315         /* First make sure individual recovery_offsets are correct */
2316         rdev_for_each(rdev, mddev) {
2317                 if (rdev->raid_disk >= 0 &&
2318                     mddev->delta_disks >= 0 &&
2319                     !test_bit(Journal, &rdev->flags) &&
2320                     !test_bit(In_sync, &rdev->flags) &&
2321                     mddev->curr_resync_completed > rdev->recovery_offset)
2322                                 rdev->recovery_offset = mddev->curr_resync_completed;
2323
2324         }
2325         if (!mddev->persistent) {
2326                 clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
2327                 clear_bit(MD_CHANGE_DEVS, &mddev->flags);
2328                 if (!mddev->external) {
2329                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
2330                         rdev_for_each(rdev, mddev) {
2331                                 if (rdev->badblocks.changed) {
2332                                         rdev->badblocks.changed = 0;
2333                                         ack_all_badblocks(&rdev->badblocks);
2334                                         md_error(mddev, rdev);
2335                                 }
2336                                 clear_bit(Blocked, &rdev->flags);
2337                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2338                                 wake_up(&rdev->blocked_wait);
2339                         }
2340                 }
2341                 wake_up(&mddev->sb_wait);
2342                 return;
2343         }
2344
2345         spin_lock(&mddev->lock);
2346
2347         mddev->utime = ktime_get_real_seconds();
2348
2349         if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
2350                 force_change = 1;
2351         if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
2352                 /* just a clean<-> dirty transition, possibly leave spares alone,
2353                  * though if events isn't the right even/odd, we will have to do
2354                  * spares after all
2355                  */
2356                 nospares = 1;
2357         if (force_change)
2358                 nospares = 0;
2359         if (mddev->degraded)
2360                 /* If the array is degraded, then skipping spares is both
2361                  * dangerous and fairly pointless.
2362                  * Dangerous because a device that was removed from the array
2363                  * might have a event_count that still looks up-to-date,
2364                  * so it can be re-added without a resync.
2365                  * Pointless because if there are any spares to skip,
2366                  * then a recovery will happen and soon that array won't
2367                  * be degraded any more and the spare can go back to sleep then.
2368                  */
2369                 nospares = 0;
2370
2371         sync_req = mddev->in_sync;
2372
2373         /* If this is just a dirty<->clean transition, and the array is clean
2374          * and 'events' is odd, we can roll back to the previous clean state */
2375         if (nospares
2376             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2377             && mddev->can_decrease_events
2378             && mddev->events != 1) {
2379                 mddev->events--;
2380                 mddev->can_decrease_events = 0;
2381         } else {
2382                 /* otherwise we have to go forward and ... */
2383                 mddev->events ++;
2384                 mddev->can_decrease_events = nospares;
2385         }
2386
2387         /*
2388          * This 64-bit counter should never wrap.
2389          * Either we are in around ~1 trillion A.C., assuming
2390          * 1 reboot per second, or we have a bug...
2391          */
2392         WARN_ON(mddev->events == 0);
2393
2394         rdev_for_each(rdev, mddev) {
2395                 if (rdev->badblocks.changed)
2396                         any_badblocks_changed++;
2397                 if (test_bit(Faulty, &rdev->flags))
2398                         set_bit(FaultRecorded, &rdev->flags);
2399         }
2400
2401         sync_sbs(mddev, nospares);
2402         spin_unlock(&mddev->lock);
2403
2404         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2405                  mdname(mddev), mddev->in_sync);
2406
2407         if (mddev->queue)
2408                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2409         bitmap_update_sb(mddev->bitmap);
2410         rdev_for_each(rdev, mddev) {
2411                 char b[BDEVNAME_SIZE];
2412
2413                 if (rdev->sb_loaded != 1)
2414                         continue; /* no noise on spare devices */
2415
2416                 if (!test_bit(Faulty, &rdev->flags)) {
2417                         md_super_write(mddev,rdev,
2418                                        rdev->sb_start, rdev->sb_size,
2419                                        rdev->sb_page);
2420                         pr_debug("md: (write) %s's sb offset: %llu\n",
2421                                  bdevname(rdev->bdev, b),
2422                                  (unsigned long long)rdev->sb_start);
2423                         rdev->sb_events = mddev->events;
2424                         if (rdev->badblocks.size) {
2425                                 md_super_write(mddev, rdev,
2426                                                rdev->badblocks.sector,
2427                                                rdev->badblocks.size << 9,
2428                                                rdev->bb_page);
2429                                 rdev->badblocks.size = 0;
2430                         }
2431
2432                 } else
2433                         pr_debug("md: %s (skipping faulty)\n",
2434                                  bdevname(rdev->bdev, b));
2435
2436                 if (mddev->level == LEVEL_MULTIPATH)
2437                         /* only need to write one superblock... */
2438                         break;
2439         }
2440         md_super_wait(mddev);
2441         /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
2442
2443         if (mddev_is_clustered(mddev) && ret == 0)
2444                 md_cluster_ops->metadata_update_finish(mddev);
2445
2446         if (mddev->in_sync != sync_req ||
2447             !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
2448                                BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
2449                 /* have to write it out again */
2450                 goto repeat;
2451         wake_up(&mddev->sb_wait);
2452         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2453                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2454
2455         rdev_for_each(rdev, mddev) {
2456                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2457                         clear_bit(Blocked, &rdev->flags);
2458
2459                 if (any_badblocks_changed)
2460                         ack_all_badblocks(&rdev->badblocks);
2461                 clear_bit(BlockedBadBlocks, &rdev->flags);
2462                 wake_up(&rdev->blocked_wait);
2463         }
2464 }
2465 EXPORT_SYMBOL(md_update_sb);
2466
2467 static int add_bound_rdev(struct md_rdev *rdev)
2468 {
2469         struct mddev *mddev = rdev->mddev;
2470         int err = 0;
2471         bool add_journal = test_bit(Journal, &rdev->flags);
2472
2473         if (!mddev->pers->hot_remove_disk || add_journal) {
2474                 /* If there is hot_add_disk but no hot_remove_disk
2475                  * then added disks for geometry changes,
2476                  * and should be added immediately.
2477                  */
2478                 super_types[mddev->major_version].
2479                         validate_super(mddev, rdev);
2480                 if (add_journal)
2481                         mddev_suspend(mddev);
2482                 err = mddev->pers->hot_add_disk(mddev, rdev);
2483                 if (add_journal)
2484                         mddev_resume(mddev);
2485                 if (err) {
2486                         md_kick_rdev_from_array(rdev);
2487                         return err;
2488                 }
2489         }
2490         sysfs_notify_dirent_safe(rdev->sysfs_state);
2491
2492         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2493         if (mddev->degraded)
2494                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2495         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2496         md_new_event(mddev);
2497         md_wakeup_thread(mddev->thread);
2498         return 0;
2499 }
2500
2501 /* words written to sysfs files may, or may not, be \n terminated.
2502  * We want to accept with case. For this we use cmd_match.
2503  */
2504 static int cmd_match(const char *cmd, const char *str)
2505 {
2506         /* See if cmd, written into a sysfs file, matches
2507          * str.  They must either be the same, or cmd can
2508          * have a trailing newline
2509          */
2510         while (*cmd && *str && *cmd == *str) {
2511                 cmd++;
2512                 str++;
2513         }
2514         if (*cmd == '\n')
2515                 cmd++;
2516         if (*str || *cmd)
2517                 return 0;
2518         return 1;
2519 }
2520
2521 struct rdev_sysfs_entry {
2522         struct attribute attr;
2523         ssize_t (*show)(struct md_rdev *, char *);
2524         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2525 };
2526
2527 static ssize_t
2528 state_show(struct md_rdev *rdev, char *page)
2529 {
2530         char *sep = ",";
2531         size_t len = 0;
2532         unsigned long flags = ACCESS_ONCE(rdev->flags);
2533
2534         if (test_bit(Faulty, &flags) ||
2535             (!test_bit(ExternalBbl, &flags) &&
2536             rdev->badblocks.unacked_exist))
2537                 len += sprintf(page+len, "faulty%s", sep);
2538         if (test_bit(In_sync, &flags))
2539                 len += sprintf(page+len, "in_sync%s", sep);
2540         if (test_bit(Journal, &flags))
2541                 len += sprintf(page+len, "journal%s", sep);
2542         if (test_bit(WriteMostly, &flags))
2543                 len += sprintf(page+len, "write_mostly%s", sep);
2544         if (test_bit(Blocked, &flags) ||
2545             (rdev->badblocks.unacked_exist
2546              && !test_bit(Faulty, &flags)))
2547                 len += sprintf(page+len, "blocked%s", sep);
2548         if (!test_bit(Faulty, &flags) &&
2549             !test_bit(Journal, &flags) &&
2550             !test_bit(In_sync, &flags))
2551                 len += sprintf(page+len, "spare%s", sep);
2552         if (test_bit(WriteErrorSeen, &flags))
2553                 len += sprintf(page+len, "write_error%s", sep);
2554         if (test_bit(WantReplacement, &flags))
2555                 len += sprintf(page+len, "want_replacement%s", sep);
2556         if (test_bit(Replacement, &flags))
2557                 len += sprintf(page+len, "replacement%s", sep);
2558         if (test_bit(ExternalBbl, &flags))
2559                 len += sprintf(page+len, "external_bbl%s", sep);
2560
2561         if (len)
2562                 len -= strlen(sep);
2563
2564         return len+sprintf(page+len, "\n");
2565 }
2566
2567 static ssize_t
2568 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2569 {
2570         /* can write
2571          *  faulty  - simulates an error
2572          *  remove  - disconnects the device
2573          *  writemostly - sets write_mostly
2574          *  -writemostly - clears write_mostly
2575          *  blocked - sets the Blocked flags
2576          *  -blocked - clears the Blocked and possibly simulates an error
2577          *  insync - sets Insync providing device isn't active
2578          *  -insync - clear Insync for a device with a slot assigned,
2579          *            so that it gets rebuilt based on bitmap
2580          *  write_error - sets WriteErrorSeen
2581          *  -write_error - clears WriteErrorSeen
2582          */
2583         int err = -EINVAL;
2584         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2585                 md_error(rdev->mddev, rdev);
2586                 if (test_bit(Faulty, &rdev->flags))
2587                         err = 0;
2588                 else
2589                         err = -EBUSY;
2590         } else if (cmd_match(buf, "remove")) {
2591                 if (rdev->mddev->pers) {
2592                         clear_bit(Blocked, &rdev->flags);
2593                         remove_and_add_spares(rdev->mddev, rdev);
2594                 }
2595                 if (rdev->raid_disk >= 0)
2596                         err = -EBUSY;
2597                 else {
2598                         struct mddev *mddev = rdev->mddev;
2599                         err = 0;
2600                         if (mddev_is_clustered(mddev))
2601                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2602
2603                         if (err == 0) {
2604                                 md_kick_rdev_from_array(rdev);
2605                                 if (mddev->pers) {
2606                                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
2607                                         md_wakeup_thread(mddev->thread);
2608                                 }
2609                                 md_new_event(mddev);
2610                         }
2611                 }
2612         } else if (cmd_match(buf, "writemostly")) {
2613                 set_bit(WriteMostly, &rdev->flags);
2614                 err = 0;
2615         } else if (cmd_match(buf, "-writemostly")) {
2616                 clear_bit(WriteMostly, &rdev->flags);
2617                 err = 0;
2618         } else if (cmd_match(buf, "blocked")) {
2619                 set_bit(Blocked, &rdev->flags);
2620                 err = 0;
2621         } else if (cmd_match(buf, "-blocked")) {
2622                 if (!test_bit(Faulty, &rdev->flags) &&
2623                     !test_bit(ExternalBbl, &rdev->flags) &&
2624                     rdev->badblocks.unacked_exist) {
2625                         /* metadata handler doesn't understand badblocks,
2626                          * so we need to fail the device
2627                          */
2628                         md_error(rdev->mddev, rdev);
2629                 }
2630                 clear_bit(Blocked, &rdev->flags);
2631                 clear_bit(BlockedBadBlocks, &rdev->flags);
2632                 wake_up(&rdev->blocked_wait);
2633                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2634                 md_wakeup_thread(rdev->mddev->thread);
2635
2636                 err = 0;
2637         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2638                 set_bit(In_sync, &rdev->flags);
2639                 err = 0;
2640         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2641                    !test_bit(Journal, &rdev->flags)) {
2642                 if (rdev->mddev->pers == NULL) {
2643                         clear_bit(In_sync, &rdev->flags);
2644                         rdev->saved_raid_disk = rdev->raid_disk;
2645                         rdev->raid_disk = -1;
2646                         err = 0;
2647                 }
2648         } else if (cmd_match(buf, "write_error")) {
2649                 set_bit(WriteErrorSeen, &rdev->flags);
2650                 err = 0;
2651         } else if (cmd_match(buf, "-write_error")) {
2652                 clear_bit(WriteErrorSeen, &rdev->flags);
2653                 err = 0;
2654         } else if (cmd_match(buf, "want_replacement")) {
2655                 /* Any non-spare device that is not a replacement can
2656                  * become want_replacement at any time, but we then need to
2657                  * check if recovery is needed.
2658                  */
2659                 if (rdev->raid_disk >= 0 &&
2660                     !test_bit(Journal, &rdev->flags) &&
2661                     !test_bit(Replacement, &rdev->flags))
2662                         set_bit(WantReplacement, &rdev->flags);
2663                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2664                 md_wakeup_thread(rdev->mddev->thread);
2665                 err = 0;
2666         } else if (cmd_match(buf, "-want_replacement")) {
2667                 /* Clearing 'want_replacement' is always allowed.
2668                  * Once replacements starts it is too late though.
2669                  */
2670                 err = 0;
2671                 clear_bit(WantReplacement, &rdev->flags);
2672         } else if (cmd_match(buf, "replacement")) {
2673                 /* Can only set a device as a replacement when array has not
2674                  * yet been started.  Once running, replacement is automatic
2675                  * from spares, or by assigning 'slot'.
2676                  */
2677                 if (rdev->mddev->pers)
2678                         err = -EBUSY;
2679                 else {
2680                         set_bit(Replacement, &rdev->flags);
2681                         err = 0;
2682                 }
2683         } else if (cmd_match(buf, "-replacement")) {
2684                 /* Similarly, can only clear Replacement before start */
2685                 if (rdev->mddev->pers)
2686                         err = -EBUSY;
2687                 else {
2688                         clear_bit(Replacement, &rdev->flags);
2689                         err = 0;
2690                 }
2691         } else if (cmd_match(buf, "re-add")) {
2692                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2693                         /* clear_bit is performed _after_ all the devices
2694                          * have their local Faulty bit cleared. If any writes
2695                          * happen in the meantime in the local node, they
2696                          * will land in the local bitmap, which will be synced
2697                          * by this node eventually
2698                          */
2699                         if (!mddev_is_clustered(rdev->mddev) ||
2700                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2701                                 clear_bit(Faulty, &rdev->flags);
2702                                 err = add_bound_rdev(rdev);
2703                         }
2704                 } else
2705                         err = -EBUSY;
2706         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
2707                 set_bit(ExternalBbl, &rdev->flags);
2708                 rdev->badblocks.shift = 0;
2709                 err = 0;
2710         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
2711                 clear_bit(ExternalBbl, &rdev->flags);
2712                 err = 0;
2713         }
2714         if (!err)
2715                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2716         return err ? err : len;
2717 }
2718 static struct rdev_sysfs_entry rdev_state =
2719 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2720
2721 static ssize_t
2722 errors_show(struct md_rdev *rdev, char *page)
2723 {
2724         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2725 }
2726
2727 static ssize_t
2728 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2729 {
2730         unsigned int n;
2731         int rv;
2732
2733         rv = kstrtouint(buf, 10, &n);
2734         if (rv < 0)
2735                 return rv;
2736         atomic_set(&rdev->corrected_errors, n);
2737         return len;
2738 }
2739 static struct rdev_sysfs_entry rdev_errors =
2740 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2741
2742 static ssize_t
2743 slot_show(struct md_rdev *rdev, char *page)
2744 {
2745         if (test_bit(Journal, &rdev->flags))
2746                 return sprintf(page, "journal\n");
2747         else if (rdev->raid_disk < 0)
2748                 return sprintf(page, "none\n");
2749         else
2750                 return sprintf(page, "%d\n", rdev->raid_disk);
2751 }
2752
2753 static ssize_t
2754 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2755 {
2756         int slot;
2757         int err;
2758
2759         if (test_bit(Journal, &rdev->flags))
2760                 return -EBUSY;
2761         if (strncmp(buf, "none", 4)==0)
2762                 slot = -1;
2763         else {
2764                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2765                 if (err < 0)
2766                         return err;
2767         }
2768         if (rdev->mddev->pers && slot == -1) {
2769                 /* Setting 'slot' on an active array requires also
2770                  * updating the 'rd%d' link, and communicating
2771                  * with the personality with ->hot_*_disk.
2772                  * For now we only support removing
2773                  * failed/spare devices.  This normally happens automatically,
2774                  * but not when the metadata is externally managed.
2775                  */
2776                 if (rdev->raid_disk == -1)
2777                         return -EEXIST;
2778                 /* personality does all needed checks */
2779                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2780                         return -EINVAL;
2781                 clear_bit(Blocked, &rdev->flags);
2782                 remove_and_add_spares(rdev->mddev, rdev);
2783                 if (rdev->raid_disk >= 0)
2784                         return -EBUSY;
2785                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2786                 md_wakeup_thread(rdev->mddev->thread);
2787         } else if (rdev->mddev->pers) {
2788                 /* Activating a spare .. or possibly reactivating
2789                  * if we ever get bitmaps working here.
2790                  */
2791                 int err;
2792
2793                 if (rdev->raid_disk != -1)
2794                         return -EBUSY;
2795
2796                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2797                         return -EBUSY;
2798
2799                 if (rdev->mddev->pers->hot_add_disk == NULL)
2800                         return -EINVAL;
2801
2802                 if (slot >= rdev->mddev->raid_disks &&
2803                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2804                         return -ENOSPC;
2805
2806                 rdev->raid_disk = slot;
2807                 if (test_bit(In_sync, &rdev->flags))
2808                         rdev->saved_raid_disk = slot;
2809                 else
2810                         rdev->saved_raid_disk = -1;
2811                 clear_bit(In_sync, &rdev->flags);
2812                 clear_bit(Bitmap_sync, &rdev->flags);
2813                 err = rdev->mddev->pers->
2814                         hot_add_disk(rdev->mddev, rdev);
2815                 if (err) {
2816                         rdev->raid_disk = -1;
2817                         return err;
2818                 } else
2819                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2820                 if (sysfs_link_rdev(rdev->mddev, rdev))
2821                         /* failure here is OK */;
2822                 /* don't wakeup anyone, leave that to userspace. */
2823         } else {
2824                 if (slot >= rdev->mddev->raid_disks &&
2825                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2826                         return -ENOSPC;
2827                 rdev->raid_disk = slot;
2828                 /* assume it is working */
2829                 clear_bit(Faulty, &rdev->flags);
2830                 clear_bit(WriteMostly, &rdev->flags);
2831                 set_bit(In_sync, &rdev->flags);
2832                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2833         }
2834         return len;
2835 }
2836
2837 static struct rdev_sysfs_entry rdev_slot =
2838 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2839
2840 static ssize_t
2841 offset_show(struct md_rdev *rdev, char *page)
2842 {
2843         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2844 }
2845
2846 static ssize_t
2847 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2848 {
2849         unsigned long long offset;
2850         if (kstrtoull(buf, 10, &offset) < 0)
2851                 return -EINVAL;
2852         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2853                 return -EBUSY;
2854         if (rdev->sectors && rdev->mddev->external)
2855                 /* Must set offset before size, so overlap checks
2856                  * can be sane */
2857                 return -EBUSY;
2858         rdev->data_offset = offset;
2859         rdev->new_data_offset = offset;
2860         return len;
2861 }
2862
2863 static struct rdev_sysfs_entry rdev_offset =
2864 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2865
2866 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2867 {
2868         return sprintf(page, "%llu\n",
2869                        (unsigned long long)rdev->new_data_offset);
2870 }
2871
2872 static ssize_t new_offset_store(struct md_rdev *rdev,
2873                                 const char *buf, size_t len)
2874 {
2875         unsigned long long new_offset;
2876         struct mddev *mddev = rdev->mddev;
2877
2878         if (kstrtoull(buf, 10, &new_offset) < 0)
2879                 return -EINVAL;
2880
2881         if (mddev->sync_thread ||
2882             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2883                 return -EBUSY;
2884         if (new_offset == rdev->data_offset)
2885                 /* reset is always permitted */
2886                 ;
2887         else if (new_offset > rdev->data_offset) {
2888                 /* must not push array size beyond rdev_sectors */
2889                 if (new_offset - rdev->data_offset
2890                     + mddev->dev_sectors > rdev->sectors)
2891                                 return -E2BIG;
2892         }
2893         /* Metadata worries about other space details. */
2894
2895         /* decreasing the offset is inconsistent with a backwards
2896          * reshape.
2897          */
2898         if (new_offset < rdev->data_offset &&
2899             mddev->reshape_backwards)
2900                 return -EINVAL;
2901         /* Increasing offset is inconsistent with forwards
2902          * reshape.  reshape_direction should be set to
2903          * 'backwards' first.
2904          */
2905         if (new_offset > rdev->data_offset &&
2906             !mddev->reshape_backwards)
2907                 return -EINVAL;
2908
2909         if (mddev->pers && mddev->persistent &&
2910             !super_types[mddev->major_version]
2911             .allow_new_offset(rdev, new_offset))
2912                 return -E2BIG;
2913         rdev->new_data_offset = new_offset;
2914         if (new_offset > rdev->data_offset)
2915                 mddev->reshape_backwards = 1;
2916         else if (new_offset < rdev->data_offset)
2917                 mddev->reshape_backwards = 0;
2918
2919         return len;
2920 }
2921 static struct rdev_sysfs_entry rdev_new_offset =
2922 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2923
2924 static ssize_t
2925 rdev_size_show(struct md_rdev *rdev, char *page)
2926 {
2927         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2928 }
2929
2930 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2931 {
2932         /* check if two start/length pairs overlap */
2933         if (s1+l1 <= s2)
2934                 return 0;
2935         if (s2+l2 <= s1)
2936                 return 0;
2937         return 1;
2938 }
2939
2940 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2941 {
2942         unsigned long long blocks;
2943         sector_t new;
2944
2945         if (kstrtoull(buf, 10, &blocks) < 0)
2946                 return -EINVAL;
2947
2948         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2949                 return -EINVAL; /* sector conversion overflow */
2950
2951         new = blocks * 2;
2952         if (new != blocks * 2)
2953                 return -EINVAL; /* unsigned long long to sector_t overflow */
2954
2955         *sectors = new;
2956         return 0;
2957 }
2958
2959 static ssize_t
2960 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2961 {
2962         struct mddev *my_mddev = rdev->mddev;
2963         sector_t oldsectors = rdev->sectors;
2964         sector_t sectors;
2965
2966         if (test_bit(Journal, &rdev->flags))
2967                 return -EBUSY;
2968         if (strict_blocks_to_sectors(buf, &sectors) < 0)
2969                 return -EINVAL;
2970         if (rdev->data_offset != rdev->new_data_offset)
2971                 return -EINVAL; /* too confusing */
2972         if (my_mddev->pers && rdev->raid_disk >= 0) {
2973                 if (my_mddev->persistent) {
2974                         sectors = super_types[my_mddev->major_version].
2975                                 rdev_size_change(rdev, sectors);
2976                         if (!sectors)
2977                                 return -EBUSY;
2978                 } else if (!sectors)
2979                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
2980                                 rdev->data_offset;
2981                 if (!my_mddev->pers->resize)
2982                         /* Cannot change size for RAID0 or Linear etc */
2983                         return -EINVAL;
2984         }
2985         if (sectors < my_mddev->dev_sectors)
2986                 return -EINVAL; /* component must fit device */
2987
2988         rdev->sectors = sectors;
2989         if (sectors > oldsectors && my_mddev->external) {
2990                 /* Need to check that all other rdevs with the same
2991                  * ->bdev do not overlap.  'rcu' is sufficient to walk
2992                  * the rdev lists safely.
2993                  * This check does not provide a hard guarantee, it
2994                  * just helps avoid dangerous mistakes.
2995                  */
2996                 struct mddev *mddev;
2997                 int overlap = 0;
2998                 struct list_head *tmp;
2999
3000                 rcu_read_lock();
3001                 for_each_mddev(mddev, tmp) {
3002                         struct md_rdev *rdev2;
3003
3004                         rdev_for_each(rdev2, mddev)
3005                                 if (rdev->bdev == rdev2->bdev &&
3006                                     rdev != rdev2 &&
3007                                     overlaps(rdev->data_offset, rdev->sectors,
3008                                              rdev2->data_offset,
3009                                              rdev2->sectors)) {
3010                                         overlap = 1;
3011                                         break;
3012                                 }
3013                         if (overlap) {
3014                                 mddev_put(mddev);
3015                                 break;
3016                         }
3017                 }
3018                 rcu_read_unlock();
3019                 if (overlap) {
3020                         /* Someone else could have slipped in a size
3021                          * change here, but doing so is just silly.
3022                          * We put oldsectors back because we *know* it is
3023                          * safe, and trust userspace not to race with
3024                          * itself
3025                          */
3026                         rdev->sectors = oldsectors;
3027                         return -EBUSY;
3028                 }
3029         }
3030         return len;
3031 }
3032
3033 static struct rdev_sysfs_entry rdev_size =
3034 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3035
3036 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3037 {
3038         unsigned long long recovery_start = rdev->recovery_offset;
3039
3040         if (test_bit(In_sync, &rdev->flags) ||
3041             recovery_start == MaxSector)
3042                 return sprintf(page, "none\n");
3043
3044         return sprintf(page, "%llu\n", recovery_start);
3045 }
3046
3047 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3048 {
3049         unsigned long long recovery_start;
3050
3051         if (cmd_match(buf, "none"))
3052                 recovery_start = MaxSector;
3053         else if (kstrtoull(buf, 10, &recovery_start))
3054                 return -EINVAL;
3055
3056         if (rdev->mddev->pers &&
3057             rdev->raid_disk >= 0)
3058                 return -EBUSY;
3059
3060         rdev->recovery_offset = recovery_start;
3061         if (recovery_start == MaxSector)
3062                 set_bit(In_sync, &rdev->flags);
3063         else
3064                 clear_bit(In_sync, &rdev->flags);
3065         return len;
3066 }
3067
3068 static struct rdev_sysfs_entry rdev_recovery_start =
3069 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3070
3071 /* sysfs access to bad-blocks list.
3072  * We present two files.
3073  * 'bad-blocks' lists sector numbers and lengths of ranges that
3074  *    are recorded as bad.  The list is truncated to fit within
3075  *    the one-page limit of sysfs.
3076  *    Writing "sector length" to this file adds an acknowledged
3077  *    bad block list.
3078  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3079  *    been acknowledged.  Writing to this file adds bad blocks
3080  *    without acknowledging them.  This is largely for testing.
3081  */
3082 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3083 {
3084         return badblocks_show(&rdev->badblocks, page, 0);
3085 }
3086 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3087 {
3088         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3089         /* Maybe that ack was all we needed */
3090         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3091                 wake_up(&rdev->blocked_wait);
3092         return rv;
3093 }
3094 static struct rdev_sysfs_entry rdev_bad_blocks =
3095 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3096
3097 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3098 {
3099         return badblocks_show(&rdev->badblocks, page, 1);
3100 }
3101 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3102 {
3103         return badblocks_store(&rdev->badblocks, page, len, 1);
3104 }
3105 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3106 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3107
3108 static struct attribute *rdev_default_attrs[] = {
3109         &rdev_state.attr,
3110         &rdev_errors.attr,
3111         &rdev_slot.attr,
3112         &rdev_offset.attr,
3113         &rdev_new_offset.attr,
3114         &rdev_size.attr,
3115         &rdev_recovery_start.attr,
3116         &rdev_bad_blocks.attr,
3117         &rdev_unack_bad_blocks.attr,
3118         NULL,
3119 };
3120 static ssize_t
3121 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3122 {
3123         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3124         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3125
3126         if (!entry->show)
3127                 return -EIO;
3128         if (!rdev->mddev)
3129                 return -EBUSY;
3130         return entry->show(rdev, page);
3131 }
3132
3133 static ssize_t
3134 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3135               const char *page, size_t length)
3136 {
3137         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3138         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3139         ssize_t rv;
3140         struct mddev *mddev = rdev->mddev;
3141
3142         if (!entry->store)
3143                 return -EIO;
3144         if (!capable(CAP_SYS_ADMIN))
3145                 return -EACCES;
3146         rv = mddev ? mddev_lock(mddev): -EBUSY;
3147         if (!rv) {
3148                 if (rdev->mddev == NULL)
3149                         rv = -EBUSY;
3150                 else
3151                         rv = entry->store(rdev, page, length);
3152                 mddev_unlock(mddev);
3153         }
3154         return rv;
3155 }
3156
3157 static void rdev_free(struct kobject *ko)
3158 {
3159         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3160         kfree(rdev);
3161 }
3162 static const struct sysfs_ops rdev_sysfs_ops = {
3163         .show           = rdev_attr_show,
3164         .store          = rdev_attr_store,
3165 };
3166 static struct kobj_type rdev_ktype = {
3167         .release        = rdev_free,
3168         .sysfs_ops      = &rdev_sysfs_ops,
3169         .default_attrs  = rdev_default_attrs,
3170 };
3171
3172 int md_rdev_init(struct md_rdev *rdev)
3173 {
3174         rdev->desc_nr = -1;
3175         rdev->saved_raid_disk = -1;
3176         rdev->raid_disk = -1;
3177         rdev->flags = 0;
3178         rdev->data_offset = 0;
3179         rdev->new_data_offset = 0;
3180         rdev->sb_events = 0;
3181         rdev->last_read_error = 0;
3182         rdev->sb_loaded = 0;
3183         rdev->bb_page = NULL;
3184         atomic_set(&rdev->nr_pending, 0);
3185         atomic_set(&rdev->read_errors, 0);
3186         atomic_set(&rdev->corrected_errors, 0);
3187
3188         INIT_LIST_HEAD(&rdev->same_set);
3189         init_waitqueue_head(&rdev->blocked_wait);
3190
3191         /* Add space to store bad block list.
3192          * This reserves the space even on arrays where it cannot
3193          * be used - I wonder if that matters
3194          */
3195         return badblocks_init(&rdev->badblocks, 0);
3196 }
3197 EXPORT_SYMBOL_GPL(md_rdev_init);
3198 /*
3199  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3200  *
3201  * mark the device faulty if:
3202  *
3203  *   - the device is nonexistent (zero size)
3204  *   - the device has no valid superblock
3205  *
3206  * a faulty rdev _never_ has rdev->sb set.
3207  */
3208 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3209 {
3210         char b[BDEVNAME_SIZE];
3211         int err;
3212         struct md_rdev *rdev;
3213         sector_t size;
3214
3215         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3216         if (!rdev)
3217                 return ERR_PTR(-ENOMEM);
3218
3219         err = md_rdev_init(rdev);
3220         if (err)
3221                 goto abort_free;
3222         err = alloc_disk_sb(rdev);
3223         if (err)
3224                 goto abort_free;
3225
3226         err = lock_rdev(rdev, newdev, super_format == -2);
3227         if (err)
3228                 goto abort_free;
3229
3230         kobject_init(&rdev->kobj, &rdev_ktype);
3231
3232         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3233         if (!size) {
3234                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3235                         bdevname(rdev->bdev,b));
3236                 err = -EINVAL;
3237                 goto abort_free;
3238         }
3239
3240         if (super_format >= 0) {
3241                 err = super_types[super_format].
3242                         load_super(rdev, NULL, super_minor);
3243                 if (err == -EINVAL) {
3244                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3245                                 bdevname(rdev->bdev,b),
3246                                 super_format, super_minor);
3247                         goto abort_free;
3248                 }
3249                 if (err < 0) {
3250                         pr_warn("md: could not read %s's sb, not importing!\n",
3251                                 bdevname(rdev->bdev,b));
3252                         goto abort_free;
3253                 }
3254         }
3255
3256         return rdev;
3257
3258 abort_free:
3259         if (rdev->bdev)
3260                 unlock_rdev(rdev);
3261         md_rdev_clear(rdev);
3262         kfree(rdev);
3263         return ERR_PTR(err);
3264 }
3265
3266 /*
3267  * Check a full RAID array for plausibility
3268  */
3269
3270 static void analyze_sbs(struct mddev *mddev)
3271 {
3272         int i;
3273         struct md_rdev *rdev, *freshest, *tmp;
3274         char b[BDEVNAME_SIZE];
3275
3276         freshest = NULL;
3277         rdev_for_each_safe(rdev, tmp, mddev)
3278                 switch (super_types[mddev->major_version].
3279                         load_super(rdev, freshest, mddev->minor_version)) {
3280                 case 1:
3281                         freshest = rdev;
3282                         break;
3283                 case 0:
3284                         break;
3285                 default:
3286                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3287                                 bdevname(rdev->bdev,b));
3288                         md_kick_rdev_from_array(rdev);
3289                 }
3290
3291         super_types[mddev->major_version].
3292                 validate_super(mddev, freshest);
3293
3294         i = 0;
3295         rdev_for_each_safe(rdev, tmp, mddev) {
3296                 if (mddev->max_disks &&
3297                     (rdev->desc_nr >= mddev->max_disks ||
3298                      i > mddev->max_disks)) {
3299                         pr_warn("md: %s: %s: only %d devices permitted\n",
3300                                 mdname(mddev), bdevname(rdev->bdev, b),
3301                                 mddev->max_disks);
3302                         md_kick_rdev_from_array(rdev);
3303                         continue;
3304                 }
3305                 if (rdev != freshest) {
3306                         if (super_types[mddev->major_version].
3307                             validate_super(mddev, rdev)) {
3308                                 pr_warn("md: kicking non-fresh %s from array!\n",
3309                                         bdevname(rdev->bdev,b));
3310                                 md_kick_rdev_from_array(rdev);
3311                                 continue;
3312                         }
3313                 }
3314                 if (mddev->level == LEVEL_MULTIPATH) {
3315                         rdev->desc_nr = i++;
3316                         rdev->raid_disk = rdev->desc_nr;
3317                         set_bit(In_sync, &rdev->flags);
3318                 } else if (rdev->raid_disk >=
3319                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3320                            !test_bit(Journal, &rdev->flags)) {
3321                         rdev->raid_disk = -1;
3322                         clear_bit(In_sync, &rdev->flags);
3323                 }
3324         }
3325 }
3326
3327 /* Read a fixed-point number.
3328  * Numbers in sysfs attributes should be in "standard" units where
3329  * possible, so time should be in seconds.
3330  * However we internally use a a much smaller unit such as
3331  * milliseconds or jiffies.
3332  * This function takes a decimal number with a possible fractional
3333  * component, and produces an integer which is the result of
3334  * multiplying that number by 10^'scale'.
3335  * all without any floating-point arithmetic.
3336  */
3337 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3338 {
3339         unsigned long result = 0;
3340         long decimals = -1;
3341         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3342                 if (*cp == '.')
3343                         decimals = 0;
3344                 else if (decimals < scale) {
3345                         unsigned int value;
3346                         value = *cp - '0';
3347                         result = result * 10 + value;
3348                         if (decimals >= 0)
3349                                 decimals++;
3350                 }
3351                 cp++;
3352         }
3353         if (*cp == '\n')
3354                 cp++;
3355         if (*cp)
3356                 return -EINVAL;
3357         if (decimals < 0)
3358                 decimals = 0;
3359         while (decimals < scale) {
3360                 result *= 10;
3361                 decimals ++;
3362         }
3363         *res = result;
3364         return 0;
3365 }
3366
3367 static ssize_t
3368 safe_delay_show(struct mddev *mddev, char *page)
3369 {
3370         int msec = (mddev->safemode_delay*1000)/HZ;
3371         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3372 }
3373 static ssize_t
3374 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3375 {
3376         unsigned long msec;
3377
3378         if (mddev_is_clustered(mddev)) {
3379                 pr_warn("md: Safemode is disabled for clustered mode\n");
3380                 return -EINVAL;
3381         }
3382
3383         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3384                 return -EINVAL;
3385         if (msec == 0)
3386                 mddev->safemode_delay = 0;
3387         else {
3388                 unsigned long old_delay = mddev->safemode_delay;
3389                 unsigned long new_delay = (msec*HZ)/1000;
3390
3391                 if (new_delay == 0)
3392                         new_delay = 1;
3393                 mddev->safemode_delay = new_delay;
3394                 if (new_delay < old_delay || old_delay == 0)
3395                         mod_timer(&mddev->safemode_timer, jiffies+1);
3396         }
3397         return len;
3398 }
3399 static struct md_sysfs_entry md_safe_delay =
3400 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3401
3402 static ssize_t
3403 level_show(struct mddev *mddev, char *page)
3404 {
3405         struct md_personality *p;
3406         int ret;
3407         spin_lock(&mddev->lock);
3408         p = mddev->pers;
3409         if (p)
3410                 ret = sprintf(page, "%s\n", p->name);
3411         else if (mddev->clevel[0])
3412                 ret = sprintf(page, "%s\n", mddev->clevel);
3413         else if (mddev->level != LEVEL_NONE)
3414                 ret = sprintf(page, "%d\n", mddev->level);
3415         else
3416                 ret = 0;
3417         spin_unlock(&mddev->lock);
3418         return ret;
3419 }
3420
3421 static ssize_t
3422 level_store(struct mddev *mddev, const char *buf, size_t len)
3423 {
3424         char clevel[16];
3425         ssize_t rv;
3426         size_t slen = len;
3427         struct md_personality *pers, *oldpers;
3428         long level;
3429         void *priv, *oldpriv;
3430         struct md_rdev *rdev;
3431
3432         if (slen == 0 || slen >= sizeof(clevel))
3433                 return -EINVAL;
3434
3435         rv = mddev_lock(mddev);
3436         if (rv)
3437                 return rv;
3438
3439         if (mddev->pers == NULL) {
3440                 strncpy(mddev->clevel, buf, slen);
3441                 if (mddev->clevel[slen-1] == '\n')
3442                         slen--;
3443                 mddev->clevel[slen] = 0;
3444                 mddev->level = LEVEL_NONE;
3445                 rv = len;
3446                 goto out_unlock;
3447         }
3448         rv = -EROFS;
3449         if (mddev->ro)
3450                 goto out_unlock;
3451
3452         /* request to change the personality.  Need to ensure:
3453          *  - array is not engaged in resync/recovery/reshape
3454          *  - old personality can be suspended
3455          *  - new personality will access other array.
3456          */
3457
3458         rv = -EBUSY;
3459         if (mddev->sync_thread ||
3460             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3461             mddev->reshape_position != MaxSector ||
3462             mddev->sysfs_active)
3463                 goto out_unlock;
3464
3465         rv = -EINVAL;
3466         if (!mddev->pers->quiesce) {
3467                 pr_warn("md: %s: %s does not support online personality change\n",
3468                         mdname(mddev), mddev->pers->name);
3469                 goto out_unlock;
3470         }
3471
3472         /* Now find the new personality */
3473         strncpy(clevel, buf, slen);
3474         if (clevel[slen-1] == '\n')
3475                 slen--;
3476         clevel[slen] = 0;
3477         if (kstrtol(clevel, 10, &level))
3478                 level = LEVEL_NONE;
3479
3480         if (request_module("md-%s", clevel) != 0)
3481                 request_module("md-level-%s", clevel);
3482         spin_lock(&pers_lock);
3483         pers = find_pers(level, clevel);
3484         if (!pers || !try_module_get(pers->owner)) {
3485                 spin_unlock(&pers_lock);
3486                 pr_warn("md: personality %s not loaded\n", clevel);
3487                 rv = -EINVAL;
3488                 goto out_unlock;
3489         }
3490         spin_unlock(&pers_lock);
3491
3492         if (pers == mddev->pers) {
3493                 /* Nothing to do! */
3494                 module_put(pers->owner);
3495                 rv = len;
3496                 goto out_unlock;
3497         }
3498         if (!pers->takeover) {
3499                 module_put(pers->owner);
3500                 pr_warn("md: %s: %s does not support personality takeover\n",
3501                         mdname(mddev), clevel);
3502                 rv = -EINVAL;
3503                 goto out_unlock;
3504         }
3505
3506         rdev_for_each(rdev, mddev)
3507                 rdev->new_raid_disk = rdev->raid_disk;
3508
3509         /* ->takeover must set new_* and/or delta_disks
3510          * if it succeeds, and may set them when it fails.
3511          */
3512         priv = pers->takeover(mddev);
3513         if (IS_ERR(priv)) {
3514                 mddev->new_level = mddev->level;
3515                 mddev->new_layout = mddev->layout;
3516                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3517                 mddev->raid_disks -= mddev->delta_disks;
3518                 mddev->delta_disks = 0;
3519                 mddev->reshape_backwards = 0;
3520                 module_put(pers->owner);
3521                 pr_warn("md: %s: %s would not accept array\n",
3522                         mdname(mddev), clevel);
3523                 rv = PTR_ERR(priv);
3524                 goto out_unlock;
3525         }
3526
3527         /* Looks like we have a winner */
3528         mddev_suspend(mddev);
3529         mddev_detach(mddev);
3530
3531         spin_lock(&mddev->lock);
3532         oldpers = mddev->pers;
3533         oldpriv = mddev->private;
3534         mddev->pers = pers;
3535         mddev->private = priv;
3536         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3537         mddev->level = mddev->new_level;
3538         mddev->layout = mddev->new_layout;
3539         mddev->chunk_sectors = mddev->new_chunk_sectors;
3540         mddev->delta_disks = 0;
3541         mddev->reshape_backwards = 0;
3542         mddev->degraded = 0;
3543         spin_unlock(&mddev->lock);
3544
3545         if (oldpers->sync_request == NULL &&
3546             mddev->external) {
3547                 /* We are converting from a no-redundancy array
3548                  * to a redundancy array and metadata is managed
3549                  * externally so we need to be sure that writes
3550                  * won't block due to a need to transition
3551                  *      clean->dirty
3552                  * until external management is started.
3553                  */
3554                 mddev->in_sync = 0;
3555                 mddev->safemode_delay = 0;
3556                 mddev->safemode = 0;
3557         }
3558
3559         oldpers->free(mddev, oldpriv);
3560
3561         if (oldpers->sync_request == NULL &&
3562             pers->sync_request != NULL) {
3563                 /* need to add the md_redundancy_group */
3564                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3565                         pr_warn("md: cannot register extra attributes for %s\n",
3566                                 mdname(mddev));
3567                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3568         }
3569         if (oldpers->sync_request != NULL &&
3570             pers->sync_request == NULL) {
3571                 /* need to remove the md_redundancy_group */
3572                 if (mddev->to_remove == NULL)
3573                         mddev->to_remove = &md_redundancy_group;
3574         }
3575
3576         module_put(oldpers->owner);
3577
3578         rdev_for_each(rdev, mddev) {
3579                 if (rdev->raid_disk < 0)
3580                         continue;
3581                 if (rdev->new_raid_disk >= mddev->raid_disks)
3582                         rdev->new_raid_disk = -1;
3583                 if (rdev->new_raid_disk == rdev->raid_disk)
3584                         continue;
3585                 sysfs_unlink_rdev(mddev, rdev);
3586         }
3587         rdev_for_each(rdev, mddev) {
3588                 if (rdev->raid_disk < 0)
3589                         continue;
3590                 if (rdev->new_raid_disk == rdev->raid_disk)
3591                         continue;
3592                 rdev->raid_disk = rdev->new_raid_disk;
3593                 if (rdev->raid_disk < 0)
3594                         clear_bit(In_sync, &rdev->flags);
3595                 else {
3596                         if (sysfs_link_rdev(mddev, rdev))
3597                                 pr_warn("md: cannot register rd%d for %s after level change\n",
3598                                         rdev->raid_disk, mdname(mddev));
3599                 }
3600         }
3601
3602         if (pers->sync_request == NULL) {
3603                 /* this is now an array without redundancy, so
3604                  * it must always be in_sync
3605                  */
3606                 mddev->in_sync = 1;
3607                 del_timer_sync(&mddev->safemode_timer);
3608         }
3609         blk_set_stacking_limits(&mddev->queue->limits);
3610         pers->run(mddev);
3611         set_bit(MD_CHANGE_DEVS, &mddev->flags);
3612         mddev_resume(mddev);
3613         if (!mddev->thread)
3614                 md_update_sb(mddev, 1);
3615         sysfs_notify(&mddev->kobj, NULL, "level");
3616         md_new_event(mddev);
3617         rv = len;
3618 out_unlock:
3619         mddev_unlock(mddev);
3620         return rv;
3621 }
3622
3623 static struct md_sysfs_entry md_level =
3624 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3625
3626 static ssize_t
3627 layout_show(struct mddev *mddev, char *page)
3628 {
3629         /* just a number, not meaningful for all levels */
3630         if (mddev->reshape_position != MaxSector &&
3631             mddev->layout != mddev->new_layout)
3632                 return sprintf(page, "%d (%d)\n",
3633                                mddev->new_layout, mddev->layout);
3634         return sprintf(page, "%d\n", mddev->layout);
3635 }
3636
3637 static ssize_t
3638 layout_store(struct mddev *mddev, const char *buf, size_t len)
3639 {
3640         unsigned int n;
3641         int err;
3642
3643         err = kstrtouint(buf, 10, &n);
3644         if (err < 0)
3645                 return err;
3646         err = mddev_lock(mddev);
3647         if (err)
3648                 return err;
3649
3650         if (mddev->pers) {
3651                 if (mddev->pers->check_reshape == NULL)
3652                         err = -EBUSY;
3653                 else if (mddev->ro)
3654                         err = -EROFS;
3655                 else {
3656                         mddev->new_layout = n;
3657                         err = mddev->pers->check_reshape(mddev);
3658                         if (err)
3659                                 mddev->new_layout = mddev->layout;
3660                 }
3661         } else {
3662                 mddev->new_layout = n;
3663                 if (mddev->reshape_position == MaxSector)
3664                         mddev->layout = n;
3665         }
3666         mddev_unlock(mddev);
3667         return err ?: len;
3668 }
3669 static struct md_sysfs_entry md_layout =
3670 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3671
3672 static ssize_t
3673 raid_disks_show(struct mddev *mddev, char *page)
3674 {
3675         if (mddev->raid_disks == 0)
3676                 return 0;
3677         if (mddev->reshape_position != MaxSector &&
3678             mddev->delta_disks != 0)
3679                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3680                                mddev->raid_disks - mddev->delta_disks);
3681         return sprintf(page, "%d\n", mddev->raid_disks);
3682 }
3683
3684 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3685
3686 static ssize_t
3687 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3688 {
3689         unsigned int n;
3690         int err;
3691
3692         err = kstrtouint(buf, 10, &n);
3693         if (err < 0)
3694                 return err;
3695
3696         err = mddev_lock(mddev);
3697         if (err)
3698                 return err;
3699         if (mddev->pers)
3700                 err = update_raid_disks(mddev, n);
3701         else if (mddev->reshape_position != MaxSector) {
3702                 struct md_rdev *rdev;
3703                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3704
3705                 err = -EINVAL;
3706                 rdev_for_each(rdev, mddev) {
3707                         if (olddisks < n &&
3708                             rdev->data_offset < rdev->new_data_offset)
3709                                 goto out_unlock;
3710                         if (olddisks > n &&
3711                             rdev->data_offset > rdev->new_data_offset)
3712                                 goto out_unlock;
3713                 }
3714                 err = 0;
3715                 mddev->delta_disks = n - olddisks;
3716                 mddev->raid_disks = n;
3717                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3718         } else
3719                 mddev->raid_disks = n;
3720 out_unlock:
3721         mddev_unlock(mddev);
3722         return err ? err : len;
3723 }
3724 static struct md_sysfs_entry md_raid_disks =
3725 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3726
3727 static ssize_t
3728 chunk_size_show(struct mddev *mddev, char *page)
3729 {
3730         if (mddev->reshape_position != MaxSector &&
3731             mddev->chunk_sectors != mddev->new_chunk_sectors)
3732                 return sprintf(page, "%d (%d)\n",
3733                                mddev->new_chunk_sectors << 9,
3734                                mddev->chunk_sectors << 9);
3735         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3736 }
3737
3738 static ssize_t
3739 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3740 {
3741         unsigned long n;
3742         int err;
3743
3744         err = kstrtoul(buf, 10, &n);
3745         if (err < 0)
3746                 return err;
3747
3748         err = mddev_lock(mddev);
3749         if (err)
3750                 return err;
3751         if (mddev->pers) {
3752                 if (mddev->pers->check_reshape == NULL)
3753                         err = -EBUSY;
3754                 else if (mddev->ro)
3755                         err = -EROFS;
3756                 else {
3757                         mddev->new_chunk_sectors = n >> 9;
3758                         err = mddev->pers->check_reshape(mddev);
3759                         if (err)
3760                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3761                 }
3762         } else {
3763                 mddev->new_chunk_sectors = n >> 9;
3764                 if (mddev->reshape_position == MaxSector)
3765                         mddev->chunk_sectors = n >> 9;
3766         }
3767         mddev_unlock(mddev);
3768         return err ?: len;
3769 }
3770 static struct md_sysfs_entry md_chunk_size =
3771 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3772
3773 static ssize_t
3774 resync_start_show(struct mddev *mddev, char *page)
3775 {
3776         if (mddev->recovery_cp == MaxSector)
3777                 return sprintf(page, "none\n");
3778         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3779 }
3780
3781 static ssize_t
3782 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3783 {
3784         unsigned long long n;
3785         int err;
3786
3787         if (cmd_match(buf, "none"))
3788                 n = MaxSector;
3789         else {
3790                 err = kstrtoull(buf, 10, &n);
3791                 if (err < 0)
3792                         return err;
3793                 if (n != (sector_t)n)
3794                         return -EINVAL;
3795         }
3796
3797         err = mddev_lock(mddev);
3798         if (err)
3799                 return err;
3800         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3801                 err = -EBUSY;
3802
3803         if (!err) {
3804                 mddev->recovery_cp = n;
3805                 if (mddev->pers)
3806                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3807         }
3808         mddev_unlock(mddev);
3809         return err ?: len;
3810 }
3811 static struct md_sysfs_entry md_resync_start =
3812 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3813                 resync_start_show, resync_start_store);
3814
3815 /*
3816  * The array state can be:
3817  *
3818  * clear
3819  *     No devices, no size, no level
3820  *     Equivalent to STOP_ARRAY ioctl
3821  * inactive
3822  *     May have some settings, but array is not active
3823  *        all IO results in error
3824  *     When written, doesn't tear down array, but just stops it
3825  * suspended (not supported yet)
3826  *     All IO requests will block. The array can be reconfigured.
3827  *     Writing this, if accepted, will block until array is quiescent
3828  * readonly
3829  *     no resync can happen.  no superblocks get written.
3830  *     write requests fail
3831  * read-auto
3832  *     like readonly, but behaves like 'clean' on a write request.
3833  *
3834  * clean - no pending writes, but otherwise active.
3835  *     When written to inactive array, starts without resync
3836  *     If a write request arrives then
3837  *       if metadata is known, mark 'dirty' and switch to 'active'.
3838  *       if not known, block and switch to write-pending
3839  *     If written to an active array that has pending writes, then fails.
3840  * active
3841  *     fully active: IO and resync can be happening.
3842  *     When written to inactive array, starts with resync
3843  *
3844  * write-pending
3845  *     clean, but writes are blocked waiting for 'active' to be written.
3846  *
3847  * active-idle
3848  *     like active, but no writes have been seen for a while (100msec).
3849  *
3850  */
3851 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3852                    write_pending, active_idle, bad_word};
3853 static char *array_states[] = {
3854         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3855         "write-pending", "active-idle", NULL };
3856
3857 static int match_word(const char *word, char **list)
3858 {
3859         int n;
3860         for (n=0; list[n]; n++)
3861                 if (cmd_match(word, list[n]))
3862                         break;
3863         return n;
3864 }
3865
3866 static ssize_t
3867 array_state_show(struct mddev *mddev, char *page)
3868 {
3869         enum array_state st = inactive;
3870
3871         if (mddev->pers)
3872                 switch(mddev->ro) {
3873                 case 1:
3874                         st = readonly;
3875                         break;
3876                 case 2:
3877                         st = read_auto;
3878                         break;
3879                 case 0:
3880                         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
3881                                 st = write_pending;
3882                         else if (mddev->in_sync)
3883                                 st = clean;
3884                         else if (mddev->safemode)
3885                                 st = active_idle;
3886                         else
3887                                 st = active;
3888                 }
3889         else {
3890                 if (list_empty(&mddev->disks) &&
3891                     mddev->raid_disks == 0 &&
3892                     mddev->dev_sectors == 0)
3893                         st = clear;
3894                 else
3895                         st = inactive;
3896         }
3897         return sprintf(page, "%s\n", array_states[st]);
3898 }
3899
3900 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3901 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3902 static int do_md_run(struct mddev *mddev);
3903 static int restart_array(struct mddev *mddev);
3904
3905 static ssize_t
3906 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3907 {
3908         int err;
3909         enum array_state st = match_word(buf, array_states);
3910
3911         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3912                 /* don't take reconfig_mutex when toggling between
3913                  * clean and active
3914                  */
3915                 spin_lock(&mddev->lock);
3916                 if (st == active) {
3917                         restart_array(mddev);
3918                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
3919                         md_wakeup_thread(mddev->thread);
3920                         wake_up(&mddev->sb_wait);
3921                         err = 0;
3922                 } else /* st == clean */ {
3923                         restart_array(mddev);
3924                         if (atomic_read(&mddev->writes_pending) == 0) {
3925                                 if (mddev->in_sync == 0) {
3926                                         mddev->in_sync = 1;
3927                                         if (mddev->safemode == 1)
3928                                                 mddev->safemode = 0;
3929                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3930                                 }
3931                                 err = 0;
3932                         } else
3933                                 err = -EBUSY;
3934                 }
3935                 if (!err)
3936                         sysfs_notify_dirent_safe(mddev->sysfs_state);
3937                 spin_unlock(&mddev->lock);
3938                 return err ?: len;
3939         }
3940         err = mddev_lock(mddev);
3941         if (err)
3942                 return err;
3943         err = -EINVAL;
3944         switch(st) {
3945         case bad_word:
3946                 break;
3947         case clear:
3948                 /* stopping an active array */
3949                 err = do_md_stop(mddev, 0, NULL);
3950                 break;
3951         case inactive:
3952                 /* stopping an active array */
3953                 if (mddev->pers)
3954                         err = do_md_stop(mddev, 2, NULL);
3955                 else
3956                         err = 0; /* already inactive */
3957                 break;
3958         case suspended:
3959                 break; /* not supported yet */
3960         case readonly:
3961                 if (mddev->pers)
3962                         err = md_set_readonly(mddev, NULL);
3963                 else {
3964                         mddev->ro = 1;
3965                         set_disk_ro(mddev->gendisk, 1);
3966                         err = do_md_run(mddev);
3967                 }
3968                 break;
3969         case read_auto:
3970                 if (mddev->pers) {
3971                         if (mddev->ro == 0)
3972                                 err = md_set_readonly(mddev, NULL);
3973                         else if (mddev->ro == 1)
3974                                 err = restart_array(mddev);
3975                         if (err == 0) {
3976                                 mddev->ro = 2;
3977                                 set_disk_ro(mddev->gendisk, 0);
3978                         }
3979                 } else {
3980                         mddev->ro = 2;
3981                         err = do_md_run(mddev);
3982                 }
3983                 break;
3984         case clean:
3985                 if (mddev->pers) {
3986                         err = restart_array(mddev);
3987                         if (err)
3988                                 break;
3989                         spin_lock(&mddev->lock);
3990                         if (atomic_read(&mddev->writes_pending) == 0) {
3991                                 if (mddev->in_sync == 0) {
3992                                         mddev->in_sync = 1;
3993                                         if (mddev->safemode == 1)
3994                                                 mddev->safemode = 0;
3995                                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
3996                                 }
3997                                 err = 0;
3998                         } else
3999                                 err = -EBUSY;
4000                         spin_unlock(&mddev->lock);
4001                 } else
4002                         err = -EINVAL;
4003                 break;
4004         case active:
4005                 if (mddev->pers) {
4006                         err = restart_array(mddev);
4007                         if (err)
4008                                 break;
4009                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
4010                         wake_up(&mddev->sb_wait);
4011                         err = 0;
4012                 } else {
4013                         mddev->ro = 0;
4014                         set_disk_ro(mddev->gendisk, 0);
4015                         err = do_md_run(mddev);
4016                 }
4017                 break;
4018         case write_pending:
4019         case active_idle:
4020                 /* these cannot be set */
4021                 break;
4022         }
4023
4024         if (!err) {
4025                 if (mddev->hold_active == UNTIL_IOCTL)
4026                         mddev->hold_active = 0;
4027                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4028         }
4029         mddev_unlock(mddev);
4030         return err ?: len;
4031 }
4032 static struct md_sysfs_entry md_array_state =
4033 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4034
4035 static ssize_t
4036 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4037         return sprintf(page, "%d\n",
4038                        atomic_read(&mddev->max_corr_read_errors));
4039 }
4040
4041 static ssize_t
4042 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4043 {
4044         unsigned int n;
4045         int rv;
4046
4047         rv = kstrtouint(buf, 10, &n);
4048         if (rv < 0)
4049                 return rv;
4050         atomic_set(&mddev->max_corr_read_errors, n);
4051         return len;
4052 }
4053
4054 static struct md_sysfs_entry max_corr_read_errors =
4055 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4056         max_corrected_read_errors_store);
4057
4058 static ssize_t
4059 null_show(struct mddev *mddev, char *page)
4060 {
4061         return -EINVAL;
4062 }
4063
4064 static ssize_t
4065 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4066 {
4067         /* buf must be %d:%d\n? giving major and minor numbers */
4068         /* The new device is added to the array.
4069          * If the array has a persistent superblock, we read the
4070          * superblock to initialise info and check validity.
4071          * Otherwise, only checking done is that in bind_rdev_to_array,
4072          * which mainly checks size.
4073          */
4074         char *e;
4075         int major = simple_strtoul(buf, &e, 10);
4076         int minor;
4077         dev_t dev;
4078         struct md_rdev *rdev;
4079         int err;
4080
4081         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4082                 return -EINVAL;
4083         minor = simple_strtoul(e+1, &e, 10);
4084         if (*e && *e != '\n')
4085                 return -EINVAL;
4086         dev = MKDEV(major, minor);
4087         if (major != MAJOR(dev) ||
4088             minor != MINOR(dev))
4089                 return -EOVERFLOW;
4090
4091         flush_workqueue(md_misc_wq);
4092
4093         err = mddev_lock(mddev);
4094         if (err)
4095                 return err;
4096         if (mddev->persistent) {
4097                 rdev = md_import_device(dev, mddev->major_version,
4098                                         mddev->minor_version);
4099                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4100                         struct md_rdev *rdev0
4101                                 = list_entry(mddev->disks.next,
4102                                              struct md_rdev, same_set);
4103                         err = super_types[mddev->major_version]
4104                                 .load_super(rdev, rdev0, mddev->minor_version);
4105                         if (err < 0)
4106                                 goto out;
4107                 }
4108         } else if (mddev->external)
4109                 rdev = md_import_device(dev, -2, -1);
4110         else
4111                 rdev = md_import_device(dev, -1, -1);
4112
4113         if (IS_ERR(rdev)) {
4114                 mddev_unlock(mddev);
4115                 return PTR_ERR(rdev);
4116         }
4117         err = bind_rdev_to_array(rdev, mddev);
4118  out:
4119         if (err)
4120                 export_rdev(rdev);
4121         mddev_unlock(mddev);
4122         return err ? err : len;
4123 }
4124
4125 static struct md_sysfs_entry md_new_device =
4126 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4127
4128 static ssize_t
4129 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4130 {
4131         char *end;
4132         unsigned long chunk, end_chunk;
4133         int err;
4134
4135         err = mddev_lock(mddev);
4136         if (err)
4137                 return err;
4138         if (!mddev->bitmap)
4139                 goto out;
4140         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4141         while (*buf) {
4142                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4143                 if (buf == end) break;
4144                 if (*end == '-') { /* range */
4145                         buf = end + 1;
4146                         end_chunk = simple_strtoul(buf, &end, 0);
4147                         if (buf == end) break;
4148                 }
4149                 if (*end && !isspace(*end)) break;
4150                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4151                 buf = skip_spaces(end);
4152         }
4153         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4154 out:
4155         mddev_unlock(mddev);
4156         return len;
4157 }
4158
4159 static struct md_sysfs_entry md_bitmap =
4160 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4161
4162 static ssize_t
4163 size_show(struct mddev *mddev, char *page)
4164 {
4165         return sprintf(page, "%llu\n",
4166                 (unsigned long long)mddev->dev_sectors / 2);
4167 }
4168
4169 static int update_size(struct mddev *mddev, sector_t num_sectors);
4170
4171 static ssize_t
4172 size_store(struct mddev *mddev, const char *buf, size_t len)
4173 {
4174         /* If array is inactive, we can reduce the component size, but
4175          * not increase it (except from 0).
4176          * If array is active, we can try an on-line resize
4177          */
4178         sector_t sectors;
4179         int err = strict_blocks_to_sectors(buf, &sectors);
4180
4181         if (err < 0)
4182                 return err;
4183         err = mddev_lock(mddev);
4184         if (err)
4185                 return err;
4186         if (mddev->pers) {
4187                 err = update_size(mddev, sectors);
4188                 if (err == 0)
4189                         md_update_sb(mddev, 1);
4190         } else {
4191                 if (mddev->dev_sectors == 0 ||
4192                     mddev->dev_sectors > sectors)
4193                         mddev->dev_sectors = sectors;
4194                 else
4195                         err = -ENOSPC;
4196         }
4197         mddev_unlock(mddev);
4198         return err ? err : len;
4199 }
4200
4201 static struct md_sysfs_entry md_size =
4202 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4203
4204 /* Metadata version.
4205  * This is one of
4206  *   'none' for arrays with no metadata (good luck...)
4207  *   'external' for arrays with externally managed metadata,
4208  * or N.M for internally known formats
4209  */
4210 static ssize_t
4211 metadata_show(struct mddev *mddev, char *page)
4212 {
4213         if (mddev->persistent)
4214                 return sprintf(page, "%d.%d\n",
4215                                mddev->major_version, mddev->minor_version);
4216         else if (mddev->external)
4217                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4218         else
4219                 return sprintf(page, "none\n");
4220 }
4221
4222 static ssize_t
4223 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4224 {
4225         int major, minor;
4226         char *e;
4227         int err;
4228         /* Changing the details of 'external' metadata is
4229          * always permitted.  Otherwise there must be
4230          * no devices attached to the array.
4231          */
4232
4233         err = mddev_lock(mddev);
4234         if (err)
4235                 return err;
4236         err = -EBUSY;
4237         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4238                 ;
4239         else if (!list_empty(&mddev->disks))
4240                 goto out_unlock;
4241
4242         err = 0;
4243         if (cmd_match(buf, "none")) {
4244                 mddev->persistent = 0;
4245                 mddev->external = 0;
4246                 mddev->major_version = 0;
4247                 mddev->minor_version = 90;
4248                 goto out_unlock;
4249         }
4250         if (strncmp(buf, "external:", 9) == 0) {
4251                 size_t namelen = len-9;
4252                 if (namelen >= sizeof(mddev->metadata_type))
4253                         namelen = sizeof(mddev->metadata_type)-1;
4254                 strncpy(mddev->metadata_type, buf+9, namelen);
4255                 mddev->metadata_type[namelen] = 0;
4256                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4257                         mddev->metadata_type[--namelen] = 0;
4258                 mddev->persistent = 0;
4259                 mddev->external = 1;
4260                 mddev->major_version = 0;
4261                 mddev->minor_version = 90;
4262                 goto out_unlock;
4263         }
4264         major = simple_strtoul(buf, &e, 10);
4265         err = -EINVAL;
4266         if (e==buf || *e != '.')
4267                 goto out_unlock;
4268         buf = e+1;
4269         minor = simple_strtoul(buf, &e, 10);
4270         if (e==buf || (*e && *e != '\n') )
4271                 goto out_unlock;
4272         err = -ENOENT;
4273         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4274                 goto out_unlock;
4275         mddev->major_version = major;
4276         mddev->minor_version = minor;
4277         mddev->persistent = 1;
4278         mddev->external = 0;
4279         err = 0;
4280 out_unlock:
4281         mddev_unlock(mddev);
4282         return err ?: len;
4283 }
4284
4285 static struct md_sysfs_entry md_metadata =
4286 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4287
4288 static ssize_t
4289 action_show(struct mddev *mddev, char *page)
4290 {
4291         char *type = "idle";
4292         unsigned long recovery = mddev->recovery;
4293         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4294                 type = "frozen";
4295         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4296             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4297                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4298                         type = "reshape";
4299                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4300                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4301                                 type = "resync";
4302                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4303                                 type = "check";
4304                         else
4305                                 type = "repair";
4306                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4307                         type = "recover";
4308                 else if (mddev->reshape_position != MaxSector)
4309                         type = "reshape";
4310         }
4311         return sprintf(page, "%s\n", type);
4312 }
4313
4314 static ssize_t
4315 action_store(struct mddev *mddev, const char *page, size_t len)
4316 {
4317         if (!mddev->pers || !mddev->pers->sync_request)
4318                 return -EINVAL;
4319
4320
4321         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4322                 if (cmd_match(page, "frozen"))
4323                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4324                 else
4325                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4326                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4327                     mddev_lock(mddev) == 0) {
4328                         flush_workqueue(md_misc_wq);
4329                         if (mddev->sync_thread) {
4330                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4331                                 md_reap_sync_thread(mddev);
4332                         }
4333                         mddev_unlock(mddev);
4334                 }
4335         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4336                 return -EBUSY;
4337         else if (cmd_match(page, "resync"))
4338                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4339         else if (cmd_match(page, "recover")) {
4340                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4341                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4342         } else if (cmd_match(page, "reshape")) {
4343                 int err;
4344                 if (mddev->pers->start_reshape == NULL)
4345                         return -EINVAL;
4346                 err = mddev_lock(mddev);
4347                 if (!err) {
4348                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4349                                 err =  -EBUSY;
4350                         else {
4351                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4352                                 err = mddev->pers->start_reshape(mddev);
4353                         }
4354                         mddev_unlock(mddev);
4355                 }
4356                 if (err)
4357                         return err;
4358                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4359         } else {
4360                 if (cmd_match(page, "check"))
4361                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4362                 else if (!cmd_match(page, "repair"))
4363                         return -EINVAL;
4364                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4365                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4366                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4367         }
4368         if (mddev->ro == 2) {
4369                 /* A write to sync_action is enough to justify
4370                  * canceling read-auto mode
4371                  */
4372                 mddev->ro = 0;
4373                 md_wakeup_thread(mddev->sync_thread);
4374         }
4375         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4376         md_wakeup_thread(mddev->thread);
4377         sysfs_notify_dirent_safe(mddev->sysfs_action);
4378         return len;
4379 }
4380
4381 static struct md_sysfs_entry md_scan_mode =
4382 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4383
4384 static ssize_t
4385 last_sync_action_show(struct mddev *mddev, char *page)
4386 {
4387         return sprintf(page, "%s\n", mddev->last_sync_action);
4388 }
4389
4390 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4391
4392 static ssize_t
4393 mismatch_cnt_show(struct mddev *mddev, char *page)
4394 {
4395         return sprintf(page, "%llu\n",
4396                        (unsigned long long)
4397                        atomic64_read(&mddev->resync_mismatches));
4398 }
4399
4400 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4401
4402 static ssize_t
4403 sync_min_show(struct mddev *mddev, char *page)
4404 {
4405         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4406                        mddev->sync_speed_min ? "local": "system");
4407 }
4408
4409 static ssize_t
4410 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4411 {
4412         unsigned int min;
4413         int rv;
4414
4415         if (strncmp(buf, "system", 6)==0) {
4416                 min = 0;
4417         } else {
4418                 rv = kstrtouint(buf, 10, &min);
4419                 if (rv < 0)
4420                         return rv;
4421                 if (min == 0)
4422                         return -EINVAL;
4423         }
4424         mddev->sync_speed_min = min;
4425         return len;
4426 }
4427
4428 static struct md_sysfs_entry md_sync_min =
4429 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4430
4431 static ssize_t
4432 sync_max_show(struct mddev *mddev, char *page)
4433 {
4434         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4435                        mddev->sync_speed_max ? "local": "system");
4436 }
4437
4438 static ssize_t
4439 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4440 {
4441         unsigned int max;
4442         int rv;
4443
4444         if (strncmp(buf, "system", 6)==0) {
4445                 max = 0;
4446         } else {
4447                 rv = kstrtouint(buf, 10, &max);
4448                 if (rv < 0)
4449                         return rv;
4450                 if (max == 0)
4451                         return -EINVAL;
4452         }
4453         mddev->sync_speed_max = max;
4454         return len;
4455 }
4456
4457 static struct md_sysfs_entry md_sync_max =
4458 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4459
4460 static ssize_t
4461 degraded_show(struct mddev *mddev, char *page)
4462 {
4463         return sprintf(page, "%d\n", mddev->degraded);
4464 }
4465 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4466
4467 static ssize_t
4468 sync_force_parallel_show(struct mddev *mddev, char *page)
4469 {
4470         return sprintf(page, "%d\n", mddev->parallel_resync);
4471 }
4472
4473 static ssize_t
4474 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4475 {
4476         long n;
4477
4478         if (kstrtol(buf, 10, &n))
4479                 return -EINVAL;
4480
4481         if (n != 0 && n != 1)
4482                 return -EINVAL;
4483
4484         mddev->parallel_resync = n;
4485
4486         if (mddev->sync_thread)
4487                 wake_up(&resync_wait);
4488
4489         return len;
4490 }
4491
4492 /* force parallel resync, even with shared block devices */
4493 static struct md_sysfs_entry md_sync_force_parallel =
4494 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4495        sync_force_parallel_show, sync_force_parallel_store);
4496
4497 static ssize_t
4498 sync_speed_show(struct mddev *mddev, char *page)
4499 {
4500         unsigned long resync, dt, db;
4501         if (mddev->curr_resync == 0)
4502                 return sprintf(page, "none\n");
4503         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4504         dt = (jiffies - mddev->resync_mark) / HZ;
4505         if (!dt) dt++;
4506         db = resync - mddev->resync_mark_cnt;
4507         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4508 }
4509
4510 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4511
4512 static ssize_t
4513 sync_completed_show(struct mddev *mddev, char *page)
4514 {
4515         unsigned long long max_sectors, resync;
4516
4517         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4518                 return sprintf(page, "none\n");
4519
4520         if (mddev->curr_resync == 1 ||
4521             mddev->curr_resync == 2)
4522                 return sprintf(page, "delayed\n");
4523
4524         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4525             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4526                 max_sectors = mddev->resync_max_sectors;
4527         else
4528                 max_sectors = mddev->dev_sectors;
4529
4530         resync = mddev->curr_resync_completed;
4531         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4532 }
4533
4534 static struct md_sysfs_entry md_sync_completed =
4535         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4536
4537 static ssize_t
4538 min_sync_show(struct mddev *mddev, char *page)
4539 {
4540         return sprintf(page, "%llu\n",
4541                        (unsigned long long)mddev->resync_min);
4542 }
4543 static ssize_t
4544 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4545 {
4546         unsigned long long min;
4547         int err;
4548
4549         if (kstrtoull(buf, 10, &min))
4550                 return -EINVAL;
4551
4552         spin_lock(&mddev->lock);
4553         err = -EINVAL;
4554         if (min > mddev->resync_max)
4555                 goto out_unlock;
4556
4557         err = -EBUSY;
4558         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4559                 goto out_unlock;
4560
4561         /* Round down to multiple of 4K for safety */
4562         mddev->resync_min = round_down(min, 8);
4563         err = 0;
4564
4565 out_unlock:
4566         spin_unlock(&mddev->lock);
4567         return err ?: len;
4568 }
4569
4570 static struct md_sysfs_entry md_min_sync =
4571 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4572
4573 static ssize_t
4574 max_sync_show(struct mddev *mddev, char *page)
4575 {
4576         if (mddev->resync_max == MaxSector)
4577                 return sprintf(page, "max\n");
4578         else
4579                 return sprintf(page, "%llu\n",
4580                                (unsigned long long)mddev->resync_max);
4581 }
4582 static ssize_t
4583 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4584 {
4585         int err;
4586         spin_lock(&mddev->lock);
4587         if (strncmp(buf, "max", 3) == 0)
4588                 mddev->resync_max = MaxSector;
4589         else {
4590                 unsigned long long max;
4591                 int chunk;
4592
4593                 err = -EINVAL;
4594                 if (kstrtoull(buf, 10, &max))
4595                         goto out_unlock;
4596                 if (max < mddev->resync_min)
4597                         goto out_unlock;
4598
4599                 err = -EBUSY;
4600                 if (max < mddev->resync_max &&
4601                     mddev->ro == 0 &&
4602                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4603                         goto out_unlock;
4604
4605                 /* Must be a multiple of chunk_size */
4606                 chunk = mddev->chunk_sectors;
4607                 if (chunk) {
4608                         sector_t temp = max;
4609
4610                         err = -EINVAL;
4611                         if (sector_div(temp, chunk))
4612                                 goto out_unlock;
4613                 }
4614                 mddev->resync_max = max;
4615         }
4616         wake_up(&mddev->recovery_wait);
4617         err = 0;
4618 out_unlock:
4619         spin_unlock(&mddev->lock);
4620         return err ?: len;
4621 }
4622
4623 static struct md_sysfs_entry md_max_sync =
4624 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4625
4626 static ssize_t
4627 suspend_lo_show(struct mddev *mddev, char *page)
4628 {
4629         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4630 }
4631
4632 static ssize_t
4633 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4634 {
4635         unsigned long long old, new;
4636         int err;
4637
4638         err = kstrtoull(buf, 10, &new);
4639         if (err < 0)
4640                 return err;
4641         if (new != (sector_t)new)
4642                 return -EINVAL;
4643
4644         err = mddev_lock(mddev);
4645         if (err)
4646                 return err;
4647         err = -EINVAL;
4648         if (mddev->pers == NULL ||
4649             mddev->pers->quiesce == NULL)
4650                 goto unlock;
4651         old = mddev->suspend_lo;
4652         mddev->suspend_lo = new;
4653         if (new >= old)
4654                 /* Shrinking suspended region */
4655                 mddev->pers->quiesce(mddev, 2);
4656         else {
4657                 /* Expanding suspended region - need to wait */
4658                 mddev->pers->quiesce(mddev, 1);
4659                 mddev->pers->quiesce(mddev, 0);
4660         }
4661         err = 0;
4662 unlock:
4663         mddev_unlock(mddev);
4664         return err ?: len;
4665 }
4666 static struct md_sysfs_entry md_suspend_lo =
4667 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4668
4669 static ssize_t
4670 suspend_hi_show(struct mddev *mddev, char *page)
4671 {
4672         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4673 }
4674
4675 static ssize_t
4676 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4677 {
4678         unsigned long long old, new;
4679         int err;
4680
4681         err = kstrtoull(buf, 10, &new);
4682         if (err < 0)
4683                 return err;
4684         if (new != (sector_t)new)
4685                 return -EINVAL;
4686
4687         err = mddev_lock(mddev);
4688         if (err)
4689                 return err;
4690         err = -EINVAL;
4691         if (mddev->pers == NULL ||
4692             mddev->pers->quiesce == NULL)
4693                 goto unlock;
4694         old = mddev->suspend_hi;
4695         mddev->suspend_hi = new;
4696         if (new <= old)
4697                 /* Shrinking suspended region */
4698                 mddev->pers->quiesce(mddev, 2);
4699         else {
4700                 /* Expanding suspended region - need to wait */
4701                 mddev->pers->quiesce(mddev, 1);
4702                 mddev->pers->quiesce(mddev, 0);
4703         }
4704         err = 0;
4705 unlock:
4706         mddev_unlock(mddev);
4707         return err ?: len;
4708 }
4709 static struct md_sysfs_entry md_suspend_hi =
4710 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4711
4712 static ssize_t
4713 reshape_position_show(struct mddev *mddev, char *page)
4714 {
4715         if (mddev->reshape_position != MaxSector)
4716                 return sprintf(page, "%llu\n",
4717                                (unsigned long long)mddev->reshape_position);
4718         strcpy(page, "none\n");
4719         return 5;
4720 }
4721
4722 static ssize_t
4723 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4724 {
4725         struct md_rdev *rdev;
4726         unsigned long long new;
4727         int err;
4728
4729         err = kstrtoull(buf, 10, &new);
4730         if (err < 0)
4731                 return err;
4732         if (new != (sector_t)new)
4733                 return -EINVAL;
4734         err = mddev_lock(mddev);
4735         if (err)
4736                 return err;
4737         err = -EBUSY;
4738         if (mddev->pers)
4739                 goto unlock;
4740         mddev->reshape_position = new;
4741         mddev->delta_disks = 0;
4742         mddev->reshape_backwards = 0;
4743         mddev->new_level = mddev->level;
4744         mddev->new_layout = mddev->layout;
4745         mddev->new_chunk_sectors = mddev->chunk_sectors;
4746         rdev_for_each(rdev, mddev)
4747                 rdev->new_data_offset = rdev->data_offset;
4748         err = 0;
4749 unlock:
4750         mddev_unlock(mddev);
4751         return err ?: len;
4752 }
4753
4754 static struct md_sysfs_entry md_reshape_position =
4755 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4756        reshape_position_store);
4757
4758 static ssize_t
4759 reshape_direction_show(struct mddev *mddev, char *page)
4760 {
4761         return sprintf(page, "%s\n",
4762                        mddev->reshape_backwards ? "backwards" : "forwards");
4763 }
4764
4765 static ssize_t
4766 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4767 {
4768         int backwards = 0;
4769         int err;
4770
4771         if (cmd_match(buf, "forwards"))
4772                 backwards = 0;
4773         else if (cmd_match(buf, "backwards"))
4774                 backwards = 1;
4775         else
4776                 return -EINVAL;
4777         if (mddev->reshape_backwards == backwards)
4778                 return len;
4779
4780         err = mddev_lock(mddev);
4781         if (err)
4782                 return err;
4783         /* check if we are allowed to change */
4784         if (mddev->delta_disks)
4785                 err = -EBUSY;
4786         else if (mddev->persistent &&
4787             mddev->major_version == 0)
4788                 err =  -EINVAL;
4789         else
4790                 mddev->reshape_backwards = backwards;
4791         mddev_unlock(mddev);
4792         return err ?: len;
4793 }
4794
4795 static struct md_sysfs_entry md_reshape_direction =
4796 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4797        reshape_direction_store);
4798
4799 static ssize_t
4800 array_size_show(struct mddev *mddev, char *page)
4801 {
4802         if (mddev->external_size)
4803                 return sprintf(page, "%llu\n",
4804                                (unsigned long long)mddev->array_sectors/2);
4805         else
4806                 return sprintf(page, "default\n");
4807 }
4808
4809 static ssize_t
4810 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4811 {
4812         sector_t sectors;
4813         int err;
4814
4815         err = mddev_lock(mddev);
4816         if (err)
4817                 return err;
4818
4819         /* cluster raid doesn't support change array_sectors */
4820         if (mddev_is_clustered(mddev))
4821                 return -EINVAL;
4822
4823         if (strncmp(buf, "default", 7) == 0) {
4824                 if (mddev->pers)
4825                         sectors = mddev->pers->size(mddev, 0, 0);
4826                 else
4827                         sectors = mddev->array_sectors;
4828
4829                 mddev->external_size = 0;
4830         } else {
4831                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4832                         err = -EINVAL;
4833                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4834                         err = -E2BIG;
4835                 else
4836                         mddev->external_size = 1;
4837         }
4838
4839         if (!err) {
4840                 mddev->array_sectors = sectors;
4841                 if (mddev->pers) {
4842                         set_capacity(mddev->gendisk, mddev->array_sectors);
4843                         revalidate_disk(mddev->gendisk);
4844                 }
4845         }
4846         mddev_unlock(mddev);
4847         return err ?: len;
4848 }
4849
4850 static struct md_sysfs_entry md_array_size =
4851 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4852        array_size_store);
4853
4854 static struct attribute *md_default_attrs[] = {
4855         &md_level.attr,
4856         &md_layout.attr,
4857         &md_raid_disks.attr,
4858         &md_chunk_size.attr,
4859         &md_size.attr,
4860         &md_resync_start.attr,
4861         &md_metadata.attr,
4862         &md_new_device.attr,
4863         &md_safe_delay.attr,
4864         &md_array_state.attr,
4865         &md_reshape_position.attr,
4866         &md_reshape_direction.attr,
4867         &md_array_size.attr,
4868         &max_corr_read_errors.attr,
4869         NULL,
4870 };
4871
4872 static struct attribute *md_redundancy_attrs[] = {
4873         &md_scan_mode.attr,
4874         &md_last_scan_mode.attr,
4875         &md_mismatches.attr,
4876         &md_sync_min.attr,
4877         &md_sync_max.attr,
4878         &md_sync_speed.attr,
4879         &md_sync_force_parallel.attr,
4880         &md_sync_completed.attr,
4881         &md_min_sync.attr,
4882         &md_max_sync.attr,
4883         &md_suspend_lo.attr,
4884         &md_suspend_hi.attr,
4885         &md_bitmap.attr,
4886         &md_degraded.attr,
4887         NULL,
4888 };
4889 static struct attribute_group md_redundancy_group = {
4890         .name = NULL,
4891         .attrs = md_redundancy_attrs,
4892 };
4893
4894 static ssize_t
4895 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4896 {
4897         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4898         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4899         ssize_t rv;
4900
4901         if (!entry->show)
4902                 return -EIO;
4903         spin_lock(&all_mddevs_lock);
4904         if (list_empty(&mddev->all_mddevs)) {
4905                 spin_unlock(&all_mddevs_lock);
4906                 return -EBUSY;
4907         }
4908         mddev_get(mddev);
4909         spin_unlock(&all_mddevs_lock);
4910
4911         rv = entry->show(mddev, page);
4912         mddev_put(mddev);
4913         return rv;
4914 }
4915
4916 static ssize_t
4917 md_attr_store(struct kobject *kobj, struct attribute *attr,
4918               const char *page, size_t length)
4919 {
4920         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4921         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4922         ssize_t rv;
4923
4924         if (!entry->store)
4925                 return -EIO;
4926         if (!capable(CAP_SYS_ADMIN))
4927                 return -EACCES;
4928         spin_lock(&all_mddevs_lock);
4929         if (list_empty(&mddev->all_mddevs)) {
4930                 spin_unlock(&all_mddevs_lock);
4931                 return -EBUSY;
4932         }
4933         mddev_get(mddev);
4934         spin_unlock(&all_mddevs_lock);
4935         rv = entry->store(mddev, page, length);
4936         mddev_put(mddev);
4937         return rv;
4938 }
4939
4940 static void md_free(struct kobject *ko)
4941 {
4942         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4943
4944         if (mddev->sysfs_state)
4945                 sysfs_put(mddev->sysfs_state);
4946
4947         if (mddev->queue)
4948                 blk_cleanup_queue(mddev->queue);
4949         if (mddev->gendisk) {
4950                 del_gendisk(mddev->gendisk);
4951                 put_disk(mddev->gendisk);
4952         }
4953
4954         kfree(mddev);
4955 }
4956
4957 static const struct sysfs_ops md_sysfs_ops = {
4958         .show   = md_attr_show,
4959         .store  = md_attr_store,
4960 };
4961 static struct kobj_type md_ktype = {
4962         .release        = md_free,
4963         .sysfs_ops      = &md_sysfs_ops,
4964         .default_attrs  = md_default_attrs,
4965 };
4966
4967 int mdp_major = 0;
4968
4969 static void mddev_delayed_delete(struct work_struct *ws)
4970 {
4971         struct mddev *mddev = container_of(ws, struct mddev, del_work);
4972
4973         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
4974         kobject_del(&mddev->kobj);
4975         kobject_put(&mddev->kobj);
4976 }
4977
4978 static int md_alloc(dev_t dev, char *name)
4979 {
4980         static DEFINE_MUTEX(disks_mutex);
4981         struct mddev *mddev = mddev_find(dev);
4982         struct gendisk *disk;
4983         int partitioned;
4984         int shift;
4985         int unit;
4986         int error;
4987
4988         if (!mddev)
4989                 return -ENODEV;
4990
4991         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
4992         shift = partitioned ? MdpMinorShift : 0;
4993         unit = MINOR(mddev->unit) >> shift;
4994
4995         /* wait for any previous instance of this device to be
4996          * completely removed (mddev_delayed_delete).
4997          */
4998         flush_workqueue(md_misc_wq);
4999
5000         mutex_lock(&disks_mutex);
5001         error = -EEXIST;
5002         if (mddev->gendisk)
5003                 goto abort;
5004
5005         if (name) {
5006                 /* Need to ensure that 'name' is not a duplicate.
5007                  */
5008                 struct mddev *mddev2;
5009                 spin_lock(&all_mddevs_lock);
5010
5011                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5012                         if (mddev2->gendisk &&
5013                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5014                                 spin_unlock(&all_mddevs_lock);
5015                                 goto abort;
5016                         }
5017                 spin_unlock(&all_mddevs_lock);
5018         }
5019
5020         error = -ENOMEM;
5021         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5022         if (!mddev->queue)
5023                 goto abort;
5024         mddev->queue->queuedata = mddev;
5025
5026         blk_queue_make_request(mddev->queue, md_make_request);
5027         blk_set_stacking_limits(&mddev->queue->limits);
5028
5029         disk = alloc_disk(1 << shift);
5030         if (!disk) {
5031                 blk_cleanup_queue(mddev->queue);
5032                 mddev->queue = NULL;
5033                 goto abort;
5034         }
5035         disk->major = MAJOR(mddev->unit);
5036         disk->first_minor = unit << shift;
5037         if (name)
5038                 strcpy(disk->disk_name, name);
5039         else if (partitioned)
5040                 sprintf(disk->disk_name, "md_d%d", unit);
5041         else
5042                 sprintf(disk->disk_name, "md%d", unit);
5043         disk->fops = &md_fops;
5044         disk->private_data = mddev;
5045         disk->queue = mddev->queue;
5046         blk_queue_write_cache(mddev->queue, true, true);
5047         /* Allow extended partitions.  This makes the
5048          * 'mdp' device redundant, but we can't really
5049          * remove it now.
5050          */
5051         disk->flags |= GENHD_FL_EXT_DEVT;
5052         mddev->gendisk = disk;
5053         /* As soon as we call add_disk(), another thread could get
5054          * through to md_open, so make sure it doesn't get too far
5055          */
5056         mutex_lock(&mddev->open_mutex);
5057         add_disk(disk);
5058
5059         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5060                                      &disk_to_dev(disk)->kobj, "%s", "md");
5061         if (error) {
5062                 /* This isn't possible, but as kobject_init_and_add is marked
5063                  * __must_check, we must do something with the result
5064                  */
5065                 pr_debug("md: cannot register %s/md - name in use\n",
5066                          disk->disk_name);
5067                 error = 0;
5068         }
5069         if (mddev->kobj.sd &&
5070             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5071                 pr_debug("pointless warning\n");
5072         mutex_unlock(&mddev->open_mutex);
5073  abort:
5074         mutex_unlock(&disks_mutex);
5075         if (!error && mddev->kobj.sd) {
5076                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5077                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5078         }
5079         mddev_put(mddev);
5080         return error;
5081 }
5082
5083 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5084 {
5085         md_alloc(dev, NULL);
5086         return NULL;
5087 }
5088
5089 static int add_named_array(const char *val, struct kernel_param *kp)
5090 {
5091         /* val must be "md_*" where * is not all digits.
5092          * We allocate an array with a large free minor number, and
5093          * set the name to val.  val must not already be an active name.
5094          */
5095         int len = strlen(val);
5096         char buf[DISK_NAME_LEN];
5097
5098         while (len && val[len-1] == '\n')
5099                 len--;
5100         if (len >= DISK_NAME_LEN)
5101                 return -E2BIG;
5102         strlcpy(buf, val, len+1);
5103         if (strncmp(buf, "md_", 3) != 0)
5104                 return -EINVAL;
5105         return md_alloc(0, buf);
5106 }
5107
5108 static void md_safemode_timeout(unsigned long data)
5109 {
5110         struct mddev *mddev = (struct mddev *) data;
5111
5112         if (!atomic_read(&mddev->writes_pending)) {
5113                 mddev->safemode = 1;
5114                 if (mddev->external)
5115                         sysfs_notify_dirent_safe(mddev->sysfs_state);
5116         }
5117         md_wakeup_thread(mddev->thread);
5118 }
5119
5120 static int start_dirty_degraded;
5121
5122 int md_run(struct mddev *mddev)
5123 {
5124         int err;
5125         struct md_rdev *rdev;
5126         struct md_personality *pers;
5127
5128         if (list_empty(&mddev->disks))
5129                 /* cannot run an array with no devices.. */
5130                 return -EINVAL;
5131
5132         if (mddev->pers)
5133                 return -EBUSY;
5134         /* Cannot run until previous stop completes properly */
5135         if (mddev->sysfs_active)
5136                 return -EBUSY;
5137
5138         /*
5139          * Analyze all RAID superblock(s)
5140          */
5141         if (!mddev->raid_disks) {
5142                 if (!mddev->persistent)
5143                         return -EINVAL;
5144                 analyze_sbs(mddev);
5145         }
5146
5147         if (mddev->level != LEVEL_NONE)
5148                 request_module("md-level-%d", mddev->level);
5149         else if (mddev->clevel[0])
5150                 request_module("md-%s", mddev->clevel);
5151
5152         /*
5153          * Drop all container device buffers, from now on
5154          * the only valid external interface is through the md
5155          * device.
5156          */
5157         rdev_for_each(rdev, mddev) {
5158                 if (test_bit(Faulty, &rdev->flags))
5159                         continue;
5160                 sync_blockdev(rdev->bdev);
5161                 invalidate_bdev(rdev->bdev);
5162
5163                 /* perform some consistency tests on the device.
5164                  * We don't want the data to overlap the metadata,
5165                  * Internal Bitmap issues have been handled elsewhere.
5166                  */
5167                 if (rdev->meta_bdev) {
5168                         /* Nothing to check */;
5169                 } else if (rdev->data_offset < rdev->sb_start) {
5170                         if (mddev->dev_sectors &&
5171                             rdev->data_offset + mddev->dev_sectors
5172                             > rdev->sb_start) {
5173                                 pr_warn("md: %s: data overlaps metadata\n",
5174                                         mdname(mddev));
5175                                 return -EINVAL;
5176                         }
5177                 } else {
5178                         if (rdev->sb_start + rdev->sb_size/512
5179                             > rdev->data_offset) {
5180                                 pr_warn("md: %s: metadata overlaps data\n",
5181                                         mdname(mddev));
5182                                 return -EINVAL;
5183                         }
5184                 }
5185                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5186         }
5187
5188         if (mddev->bio_set == NULL)
5189                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5190
5191         spin_lock(&pers_lock);
5192         pers = find_pers(mddev->level, mddev->clevel);
5193         if (!pers || !try_module_get(pers->owner)) {
5194                 spin_unlock(&pers_lock);
5195                 if (mddev->level != LEVEL_NONE)
5196                         pr_warn("md: personality for level %d is not loaded!\n",
5197                                 mddev->level);
5198                 else
5199                         pr_warn("md: personality for level %s is not loaded!\n",
5200                                 mddev->clevel);
5201                 return -EINVAL;
5202         }
5203         spin_unlock(&pers_lock);
5204         if (mddev->level != pers->level) {
5205                 mddev->level = pers->level;
5206                 mddev->new_level = pers->level;
5207         }
5208         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5209
5210         if (mddev->reshape_position != MaxSector &&
5211             pers->start_reshape == NULL) {
5212                 /* This personality cannot handle reshaping... */
5213                 module_put(pers->owner);
5214                 return -EINVAL;
5215         }
5216
5217         if (pers->sync_request) {
5218                 /* Warn if this is a potentially silly
5219                  * configuration.
5220                  */
5221                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5222                 struct md_rdev *rdev2;
5223                 int warned = 0;
5224
5225                 rdev_for_each(rdev, mddev)
5226                         rdev_for_each(rdev2, mddev) {
5227                                 if (rdev < rdev2 &&
5228                                     rdev->bdev->bd_contains ==
5229                                     rdev2->bdev->bd_contains) {
5230                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5231                                                 mdname(mddev),
5232                                                 bdevname(rdev->bdev,b),
5233                                                 bdevname(rdev2->bdev,b2));
5234                                         warned = 1;
5235                                 }
5236                         }
5237
5238                 if (warned)
5239                         pr_warn("True protection against single-disk failure might be compromised.\n");
5240         }
5241
5242         mddev->recovery = 0;
5243         /* may be over-ridden by personality */
5244         mddev->resync_max_sectors = mddev->dev_sectors;
5245
5246         mddev->ok_start_degraded = start_dirty_degraded;
5247
5248         if (start_readonly && mddev->ro == 0)
5249                 mddev->ro = 2; /* read-only, but switch on first write */
5250
5251         err = pers->run(mddev);
5252         if (err)
5253                 pr_warn("md: pers->run() failed ...\n");
5254         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5255                 WARN_ONCE(!mddev->external_size,
5256                           "%s: default size too small, but 'external_size' not in effect?\n",
5257                           __func__);
5258                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5259                         (unsigned long long)mddev->array_sectors / 2,
5260                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5261                 err = -EINVAL;
5262         }
5263         if (err == 0 && pers->sync_request &&
5264             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5265                 struct bitmap *bitmap;
5266
5267                 bitmap = bitmap_create(mddev, -1);
5268                 if (IS_ERR(bitmap)) {
5269                         err = PTR_ERR(bitmap);
5270                         pr_warn("%s: failed to create bitmap (%d)\n",
5271                                 mdname(mddev), err);
5272                 } else
5273                         mddev->bitmap = bitmap;
5274
5275         }
5276         if (err) {
5277                 mddev_detach(mddev);
5278                 if (mddev->private)
5279                         pers->free(mddev, mddev->private);
5280                 mddev->private = NULL;
5281                 module_put(pers->owner);
5282                 bitmap_destroy(mddev);
5283                 return err;
5284         }
5285         if (mddev->queue) {
5286                 bool nonrot = true;
5287
5288                 rdev_for_each(rdev, mddev) {
5289                         if (rdev->raid_disk >= 0 &&
5290                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5291                                 nonrot = false;
5292                                 break;
5293                         }
5294                 }
5295                 if (mddev->degraded)
5296                         nonrot = false;
5297                 if (nonrot)
5298                         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5299                 else
5300                         queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5301                 mddev->queue->backing_dev_info.congested_data = mddev;
5302                 mddev->queue->backing_dev_info.congested_fn = md_congested;
5303         }
5304         if (pers->sync_request) {
5305                 if (mddev->kobj.sd &&
5306                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5307                         pr_warn("md: cannot register extra attributes for %s\n",
5308                                 mdname(mddev));
5309                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5310         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5311                 mddev->ro = 0;
5312
5313         atomic_set(&mddev->writes_pending,0);
5314         atomic_set(&mddev->max_corr_read_errors,
5315                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5316         mddev->safemode = 0;
5317         if (mddev_is_clustered(mddev))
5318                 mddev->safemode_delay = 0;
5319         else
5320                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5321         mddev->in_sync = 1;
5322         smp_wmb();
5323         spin_lock(&mddev->lock);
5324         mddev->pers = pers;
5325         spin_unlock(&mddev->lock);
5326         rdev_for_each(rdev, mddev)
5327                 if (rdev->raid_disk >= 0)
5328                         if (sysfs_link_rdev(mddev, rdev))
5329                                 /* failure here is OK */;
5330
5331         if (mddev->degraded && !mddev->ro)
5332                 /* This ensures that recovering status is reported immediately
5333                  * via sysfs - until a lack of spares is confirmed.
5334                  */
5335                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5336         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5337
5338         if (mddev->flags & MD_UPDATE_SB_FLAGS)
5339                 md_update_sb(mddev, 0);
5340
5341         md_new_event(mddev);
5342         sysfs_notify_dirent_safe(mddev->sysfs_state);
5343         sysfs_notify_dirent_safe(mddev->sysfs_action);
5344         sysfs_notify(&mddev->kobj, NULL, "degraded");
5345         return 0;
5346 }
5347 EXPORT_SYMBOL_GPL(md_run);
5348
5349 static int do_md_run(struct mddev *mddev)
5350 {
5351         int err;
5352
5353         err = md_run(mddev);
5354         if (err)
5355                 goto out;
5356         err = bitmap_load(mddev);
5357         if (err) {
5358                 bitmap_destroy(mddev);
5359                 goto out;
5360         }
5361
5362         if (mddev_is_clustered(mddev))
5363                 md_allow_write(mddev);
5364
5365         md_wakeup_thread(mddev->thread);
5366         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5367
5368         set_capacity(mddev->gendisk, mddev->array_sectors);
5369         revalidate_disk(mddev->gendisk);
5370         mddev->changed = 1;
5371         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5372 out:
5373         return err;
5374 }
5375
5376 static int restart_array(struct mddev *mddev)
5377 {
5378         struct gendisk *disk = mddev->gendisk;
5379
5380         /* Complain if it has no devices */
5381         if (list_empty(&mddev->disks))
5382                 return -ENXIO;
5383         if (!mddev->pers)
5384                 return -EINVAL;
5385         if (!mddev->ro)
5386                 return -EBUSY;
5387         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5388                 struct md_rdev *rdev;
5389                 bool has_journal = false;
5390
5391                 rcu_read_lock();
5392                 rdev_for_each_rcu(rdev, mddev) {
5393                         if (test_bit(Journal, &rdev->flags) &&
5394                             !test_bit(Faulty, &rdev->flags)) {
5395                                 has_journal = true;
5396                                 break;
5397                         }
5398                 }
5399                 rcu_read_unlock();
5400
5401                 /* Don't restart rw with journal missing/faulty */
5402                 if (!has_journal)
5403                         return -EINVAL;
5404         }
5405
5406         mddev->safemode = 0;
5407         mddev->ro = 0;
5408         set_disk_ro(disk, 0);
5409         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5410         /* Kick recovery or resync if necessary */
5411         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5412         md_wakeup_thread(mddev->thread);
5413         md_wakeup_thread(mddev->sync_thread);
5414         sysfs_notify_dirent_safe(mddev->sysfs_state);
5415         return 0;
5416 }
5417
5418 static void md_clean(struct mddev *mddev)
5419 {
5420         mddev->array_sectors = 0;
5421         mddev->external_size = 0;
5422         mddev->dev_sectors = 0;
5423         mddev->raid_disks = 0;
5424         mddev->recovery_cp = 0;
5425         mddev->resync_min = 0;
5426         mddev->resync_max = MaxSector;
5427         mddev->reshape_position = MaxSector;
5428         mddev->external = 0;
5429         mddev->persistent = 0;
5430         mddev->level = LEVEL_NONE;
5431         mddev->clevel[0] = 0;
5432         mddev->flags = 0;
5433         mddev->ro = 0;
5434         mddev->metadata_type[0] = 0;
5435         mddev->chunk_sectors = 0;
5436         mddev->ctime = mddev->utime = 0;
5437         mddev->layout = 0;
5438         mddev->max_disks = 0;
5439         mddev->events = 0;
5440         mddev->can_decrease_events = 0;
5441         mddev->delta_disks = 0;
5442         mddev->reshape_backwards = 0;
5443         mddev->new_level = LEVEL_NONE;
5444         mddev->new_layout = 0;
5445         mddev->new_chunk_sectors = 0;
5446         mddev->curr_resync = 0;
5447         atomic64_set(&mddev->resync_mismatches, 0);
5448         mddev->suspend_lo = mddev->suspend_hi = 0;
5449         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5450         mddev->recovery = 0;
5451         mddev->in_sync = 0;
5452         mddev->changed = 0;
5453         mddev->degraded = 0;
5454         mddev->safemode = 0;
5455         mddev->private = NULL;
5456         mddev->cluster_info = NULL;
5457         mddev->bitmap_info.offset = 0;
5458         mddev->bitmap_info.default_offset = 0;
5459         mddev->bitmap_info.default_space = 0;
5460         mddev->bitmap_info.chunksize = 0;
5461         mddev->bitmap_info.daemon_sleep = 0;
5462         mddev->bitmap_info.max_write_behind = 0;
5463         mddev->bitmap_info.nodes = 0;
5464 }
5465
5466 static void __md_stop_writes(struct mddev *mddev)
5467 {
5468         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5469         flush_workqueue(md_misc_wq);
5470         if (mddev->sync_thread) {
5471                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5472                 md_reap_sync_thread(mddev);
5473         }
5474
5475         del_timer_sync(&mddev->safemode_timer);
5476
5477         bitmap_flush(mddev);
5478
5479         if (mddev->ro == 0 &&
5480             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5481              (mddev->flags & MD_UPDATE_SB_FLAGS))) {
5482                 /* mark array as shutdown cleanly */
5483                 if (!mddev_is_clustered(mddev))
5484                         mddev->in_sync = 1;
5485                 md_update_sb(mddev, 1);
5486         }
5487 }
5488
5489 void md_stop_writes(struct mddev *mddev)
5490 {
5491         mddev_lock_nointr(mddev);
5492         __md_stop_writes(mddev);
5493         mddev_unlock(mddev);
5494 }
5495 EXPORT_SYMBOL_GPL(md_stop_writes);
5496
5497 static void mddev_detach(struct mddev *mddev)
5498 {
5499         struct bitmap *bitmap = mddev->bitmap;
5500         /* wait for behind writes to complete */
5501         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5502                 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
5503                          mdname(mddev));
5504                 /* need to kick something here to make sure I/O goes? */
5505                 wait_event(bitmap->behind_wait,
5506                            atomic_read(&bitmap->behind_writes) == 0);
5507         }
5508         if (mddev->pers && mddev->pers->quiesce) {
5509                 mddev->pers->quiesce(mddev, 1);
5510                 mddev->pers->quiesce(mddev, 0);
5511         }
5512         md_unregister_thread(&mddev->thread);
5513         if (mddev->queue)
5514                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5515 }
5516
5517 static void __md_stop(struct mddev *mddev)
5518 {
5519         struct md_personality *pers = mddev->pers;
5520         mddev_detach(mddev);
5521         /* Ensure ->event_work is done */
5522         flush_workqueue(md_misc_wq);
5523         spin_lock(&mddev->lock);
5524         mddev->pers = NULL;
5525         spin_unlock(&mddev->lock);
5526         pers->free(mddev, mddev->private);
5527         mddev->private = NULL;
5528         if (pers->sync_request && mddev->to_remove == NULL)
5529                 mddev->to_remove = &md_redundancy_group;
5530         module_put(pers->owner);
5531         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5532 }
5533
5534 void md_stop(struct mddev *mddev)
5535 {
5536         /* stop the array and free an attached data structures.
5537          * This is called from dm-raid
5538          */
5539         __md_stop(mddev);
5540         bitmap_destroy(mddev);
5541         if (mddev->bio_set)
5542                 bioset_free(mddev->bio_set);
5543 }
5544
5545 EXPORT_SYMBOL_GPL(md_stop);
5546
5547 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5548 {
5549         int err = 0;
5550         int did_freeze = 0;
5551
5552         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5553                 did_freeze = 1;
5554                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5555                 md_wakeup_thread(mddev->thread);
5556         }
5557         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5558                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5559         if (mddev->sync_thread)
5560                 /* Thread might be blocked waiting for metadata update
5561                  * which will now never happen */
5562                 wake_up_process(mddev->sync_thread->tsk);
5563
5564         if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
5565                 return -EBUSY;
5566         mddev_unlock(mddev);
5567         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5568                                           &mddev->recovery));
5569         wait_event(mddev->sb_wait,
5570                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
5571         mddev_lock_nointr(mddev);
5572
5573         mutex_lock(&mddev->open_mutex);
5574         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5575             mddev->sync_thread ||
5576             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5577                 pr_warn("md: %s still in use.\n",mdname(mddev));
5578                 if (did_freeze) {
5579                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5580                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5581                         md_wakeup_thread(mddev->thread);
5582                 }
5583                 err = -EBUSY;
5584                 goto out;
5585         }
5586         if (mddev->pers) {
5587                 __md_stop_writes(mddev);
5588
5589                 err  = -ENXIO;
5590                 if (mddev->ro==1)
5591                         goto out;
5592                 mddev->ro = 1;
5593                 set_disk_ro(mddev->gendisk, 1);
5594                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5595                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5596                 md_wakeup_thread(mddev->thread);
5597                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5598                 err = 0;
5599         }
5600 out:
5601         mutex_unlock(&mddev->open_mutex);
5602         return err;
5603 }
5604
5605 /* mode:
5606  *   0 - completely stop and dis-assemble array
5607  *   2 - stop but do not disassemble array
5608  */
5609 static int do_md_stop(struct mddev *mddev, int mode,
5610                       struct block_device *bdev)
5611 {
5612         struct gendisk *disk = mddev->gendisk;
5613         struct md_rdev *rdev;
5614         int did_freeze = 0;
5615
5616         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5617                 did_freeze = 1;
5618                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5619                 md_wakeup_thread(mddev->thread);
5620         }
5621         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5622                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5623         if (mddev->sync_thread)
5624                 /* Thread might be blocked waiting for metadata update
5625                  * which will now never happen */
5626                 wake_up_process(mddev->sync_thread->tsk);
5627
5628         mddev_unlock(mddev);
5629         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5630                                  !test_bit(MD_RECOVERY_RUNNING,
5631                                            &mddev->recovery)));
5632         mddev_lock_nointr(mddev);
5633
5634         mutex_lock(&mddev->open_mutex);
5635         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5636             mddev->sysfs_active ||
5637             mddev->sync_thread ||
5638             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5639                 pr_warn("md: %s still in use.\n",mdname(mddev));
5640                 mutex_unlock(&mddev->open_mutex);
5641                 if (did_freeze) {
5642                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5643                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5644                         md_wakeup_thread(mddev->thread);
5645                 }
5646                 return -EBUSY;
5647         }
5648         if (mddev->pers) {
5649                 if (mddev->ro)
5650                         set_disk_ro(disk, 0);
5651
5652                 __md_stop_writes(mddev);
5653                 __md_stop(mddev);
5654                 mddev->queue->backing_dev_info.congested_fn = NULL;
5655
5656                 /* tell userspace to handle 'inactive' */
5657                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5658
5659                 rdev_for_each(rdev, mddev)
5660                         if (rdev->raid_disk >= 0)
5661                                 sysfs_unlink_rdev(mddev, rdev);
5662
5663                 set_capacity(disk, 0);
5664                 mutex_unlock(&mddev->open_mutex);
5665                 mddev->changed = 1;
5666                 revalidate_disk(disk);
5667
5668                 if (mddev->ro)
5669                         mddev->ro = 0;
5670         } else
5671                 mutex_unlock(&mddev->open_mutex);
5672         /*
5673          * Free resources if final stop
5674          */
5675         if (mode == 0) {
5676                 pr_info("md: %s stopped.\n", mdname(mddev));
5677
5678                 bitmap_destroy(mddev);
5679                 if (mddev->bitmap_info.file) {
5680                         struct file *f = mddev->bitmap_info.file;
5681                         spin_lock(&mddev->lock);
5682                         mddev->bitmap_info.file = NULL;
5683                         spin_unlock(&mddev->lock);
5684                         fput(f);
5685                 }
5686                 mddev->bitmap_info.offset = 0;
5687
5688                 export_array(mddev);
5689
5690                 md_clean(mddev);
5691                 if (mddev->hold_active == UNTIL_STOP)
5692                         mddev->hold_active = 0;
5693         }
5694         md_new_event(mddev);
5695         sysfs_notify_dirent_safe(mddev->sysfs_state);
5696         return 0;
5697 }
5698
5699 #ifndef MODULE
5700 static void autorun_array(struct mddev *mddev)
5701 {
5702         struct md_rdev *rdev;
5703         int err;
5704
5705         if (list_empty(&mddev->disks))
5706                 return;
5707
5708         pr_info("md: running: ");
5709
5710         rdev_for_each(rdev, mddev) {
5711                 char b[BDEVNAME_SIZE];
5712                 pr_cont("<%s>", bdevname(rdev->bdev,b));
5713         }
5714         pr_cont("\n");
5715
5716         err = do_md_run(mddev);
5717         if (err) {
5718                 pr_warn("md: do_md_run() returned %d\n", err);
5719                 do_md_stop(mddev, 0, NULL);
5720         }
5721 }
5722
5723 /*
5724  * lets try to run arrays based on all disks that have arrived
5725  * until now. (those are in pending_raid_disks)
5726  *
5727  * the method: pick the first pending disk, collect all disks with
5728  * the same UUID, remove all from the pending list and put them into
5729  * the 'same_array' list. Then order this list based on superblock
5730  * update time (freshest comes first), kick out 'old' disks and
5731  * compare superblocks. If everything's fine then run it.
5732  *
5733  * If "unit" is allocated, then bump its reference count
5734  */
5735 static void autorun_devices(int part)
5736 {
5737         struct md_rdev *rdev0, *rdev, *tmp;
5738         struct mddev *mddev;
5739         char b[BDEVNAME_SIZE];
5740
5741         pr_info("md: autorun ...\n");
5742         while (!list_empty(&pending_raid_disks)) {
5743                 int unit;
5744                 dev_t dev;
5745                 LIST_HEAD(candidates);
5746                 rdev0 = list_entry(pending_raid_disks.next,
5747                                          struct md_rdev, same_set);
5748
5749                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
5750                 INIT_LIST_HEAD(&candidates);
5751                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5752                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5753                                 pr_debug("md:  adding %s ...\n",
5754                                          bdevname(rdev->bdev,b));
5755                                 list_move(&rdev->same_set, &candidates);
5756                         }
5757                 /*
5758                  * now we have a set of devices, with all of them having
5759                  * mostly sane superblocks. It's time to allocate the
5760                  * mddev.
5761                  */
5762                 if (part) {
5763                         dev = MKDEV(mdp_major,
5764                                     rdev0->preferred_minor << MdpMinorShift);
5765                         unit = MINOR(dev) >> MdpMinorShift;
5766                 } else {
5767                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5768                         unit = MINOR(dev);
5769                 }
5770                 if (rdev0->preferred_minor != unit) {
5771                         pr_warn("md: unit number in %s is bad: %d\n",
5772                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5773                         break;
5774                 }
5775
5776                 md_probe(dev, NULL, NULL);
5777                 mddev = mddev_find(dev);
5778                 if (!mddev || !mddev->gendisk) {
5779                         if (mddev)
5780                                 mddev_put(mddev);
5781                         break;
5782                 }
5783                 if (mddev_lock(mddev))
5784                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
5785                 else if (mddev->raid_disks || mddev->major_version
5786                          || !list_empty(&mddev->disks)) {
5787                         pr_warn("md: %s already running, cannot run %s\n",
5788                                 mdname(mddev), bdevname(rdev0->bdev,b));
5789                         mddev_unlock(mddev);
5790                 } else {
5791                         pr_debug("md: created %s\n", mdname(mddev));
5792                         mddev->persistent = 1;
5793                         rdev_for_each_list(rdev, tmp, &candidates) {
5794                                 list_del_init(&rdev->same_set);
5795                                 if (bind_rdev_to_array(rdev, mddev))
5796                                         export_rdev(rdev);
5797                         }
5798                         autorun_array(mddev);
5799                         mddev_unlock(mddev);
5800                 }
5801                 /* on success, candidates will be empty, on error
5802                  * it won't...
5803                  */
5804                 rdev_for_each_list(rdev, tmp, &candidates) {
5805                         list_del_init(&rdev->same_set);
5806                         export_rdev(rdev);
5807                 }
5808                 mddev_put(mddev);
5809         }
5810         pr_info("md: ... autorun DONE.\n");
5811 }
5812 #endif /* !MODULE */
5813
5814 static int get_version(void __user *arg)
5815 {
5816         mdu_version_t ver;
5817
5818         ver.major = MD_MAJOR_VERSION;
5819         ver.minor = MD_MINOR_VERSION;
5820         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5821
5822         if (copy_to_user(arg, &ver, sizeof(ver)))
5823                 return -EFAULT;
5824
5825         return 0;
5826 }
5827
5828 static int get_array_info(struct mddev *mddev, void __user *arg)
5829 {
5830         mdu_array_info_t info;
5831         int nr,working,insync,failed,spare;
5832         struct md_rdev *rdev;
5833
5834         nr = working = insync = failed = spare = 0;
5835         rcu_read_lock();
5836         rdev_for_each_rcu(rdev, mddev) {
5837                 nr++;
5838                 if (test_bit(Faulty, &rdev->flags))
5839                         failed++;
5840                 else {
5841                         working++;
5842                         if (test_bit(In_sync, &rdev->flags))
5843                                 insync++;
5844                         else if (test_bit(Journal, &rdev->flags))
5845                                 /* TODO: add journal count to md_u.h */
5846                                 ;
5847                         else
5848                                 spare++;
5849                 }
5850         }
5851         rcu_read_unlock();
5852
5853         info.major_version = mddev->major_version;
5854         info.minor_version = mddev->minor_version;
5855         info.patch_version = MD_PATCHLEVEL_VERSION;
5856         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5857         info.level         = mddev->level;
5858         info.size          = mddev->dev_sectors / 2;
5859         if (info.size != mddev->dev_sectors / 2) /* overflow */
5860                 info.size = -1;
5861         info.nr_disks      = nr;
5862         info.raid_disks    = mddev->raid_disks;
5863         info.md_minor      = mddev->md_minor;
5864         info.not_persistent= !mddev->persistent;
5865
5866         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5867         info.state         = 0;
5868         if (mddev->in_sync)
5869                 info.state = (1<<MD_SB_CLEAN);
5870         if (mddev->bitmap && mddev->bitmap_info.offset)
5871                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5872         if (mddev_is_clustered(mddev))
5873                 info.state |= (1<<MD_SB_CLUSTERED);
5874         info.active_disks  = insync;
5875         info.working_disks = working;
5876         info.failed_disks  = failed;
5877         info.spare_disks   = spare;
5878
5879         info.layout        = mddev->layout;
5880         info.chunk_size    = mddev->chunk_sectors << 9;
5881
5882         if (copy_to_user(arg, &info, sizeof(info)))
5883                 return -EFAULT;
5884
5885         return 0;
5886 }
5887
5888 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5889 {
5890         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5891         char *ptr;
5892         int err;
5893
5894         file = kzalloc(sizeof(*file), GFP_NOIO);
5895         if (!file)
5896                 return -ENOMEM;
5897
5898         err = 0;
5899         spin_lock(&mddev->lock);
5900         /* bitmap enabled */
5901         if (mddev->bitmap_info.file) {
5902                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5903                                 sizeof(file->pathname));
5904                 if (IS_ERR(ptr))
5905                         err = PTR_ERR(ptr);
5906                 else
5907                         memmove(file->pathname, ptr,
5908                                 sizeof(file->pathname)-(ptr-file->pathname));
5909         }
5910         spin_unlock(&mddev->lock);
5911
5912         if (err == 0 &&
5913             copy_to_user(arg, file, sizeof(*file)))
5914                 err = -EFAULT;
5915
5916         kfree(file);
5917         return err;
5918 }
5919
5920 static int get_disk_info(struct mddev *mddev, void __user * arg)
5921 {
5922         mdu_disk_info_t info;
5923         struct md_rdev *rdev;
5924
5925         if (copy_from_user(&info, arg, sizeof(info)))
5926                 return -EFAULT;
5927
5928         rcu_read_lock();
5929         rdev = md_find_rdev_nr_rcu(mddev, info.number);
5930         if (rdev) {
5931                 info.major = MAJOR(rdev->bdev->bd_dev);
5932                 info.minor = MINOR(rdev->bdev->bd_dev);
5933                 info.raid_disk = rdev->raid_disk;
5934                 info.state = 0;
5935                 if (test_bit(Faulty, &rdev->flags))
5936                         info.state |= (1<<MD_DISK_FAULTY);
5937                 else if (test_bit(In_sync, &rdev->flags)) {
5938                         info.state |= (1<<MD_DISK_ACTIVE);
5939                         info.state |= (1<<MD_DISK_SYNC);
5940                 }
5941                 if (test_bit(Journal, &rdev->flags))
5942                         info.state |= (1<<MD_DISK_JOURNAL);
5943                 if (test_bit(WriteMostly, &rdev->flags))
5944                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5945         } else {
5946                 info.major = info.minor = 0;
5947                 info.raid_disk = -1;
5948                 info.state = (1<<MD_DISK_REMOVED);
5949         }
5950         rcu_read_unlock();
5951
5952         if (copy_to_user(arg, &info, sizeof(info)))
5953                 return -EFAULT;
5954
5955         return 0;
5956 }
5957
5958 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
5959 {
5960         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5961         struct md_rdev *rdev;
5962         dev_t dev = MKDEV(info->major,info->minor);
5963
5964         if (mddev_is_clustered(mddev) &&
5965                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
5966                 pr_warn("%s: Cannot add to clustered mddev.\n",
5967                         mdname(mddev));
5968                 return -EINVAL;
5969         }
5970
5971         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
5972                 return -EOVERFLOW;
5973
5974         if (!mddev->raid_disks) {
5975                 int err;
5976                 /* expecting a device which has a superblock */
5977                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
5978                 if (IS_ERR(rdev)) {
5979                         pr_warn("md: md_import_device returned %ld\n",
5980                                 PTR_ERR(rdev));
5981                         return PTR_ERR(rdev);
5982                 }
5983                 if (!list_empty(&mddev->disks)) {
5984                         struct md_rdev *rdev0
5985                                 = list_entry(mddev->disks.next,
5986                                              struct md_rdev, same_set);
5987                         err = super_types[mddev->major_version]
5988                                 .load_super(rdev, rdev0, mddev->minor_version);
5989                         if (err < 0) {
5990                                 pr_warn("md: %s has different UUID to %s\n",
5991                                         bdevname(rdev->bdev,b),
5992                                         bdevname(rdev0->bdev,b2));
5993                                 export_rdev(rdev);
5994                                 return -EINVAL;
5995                         }
5996                 }
5997                 err = bind_rdev_to_array(rdev, mddev);
5998                 if (err)
5999                         export_rdev(rdev);
6000                 return err;
6001         }
6002
6003         /*
6004          * add_new_disk can be used once the array is assembled
6005          * to add "hot spares".  They must already have a superblock
6006          * written
6007          */
6008         if (mddev->pers) {
6009                 int err;
6010                 if (!mddev->pers->hot_add_disk) {
6011                         pr_warn("%s: personality does not support diskops!\n",
6012                                 mdname(mddev));
6013                         return -EINVAL;
6014                 }
6015                 if (mddev->persistent)
6016                         rdev = md_import_device(dev, mddev->major_version,
6017                                                 mddev->minor_version);
6018                 else
6019                         rdev = md_import_device(dev, -1, -1);
6020                 if (IS_ERR(rdev)) {
6021                         pr_warn("md: md_import_device returned %ld\n",
6022                                 PTR_ERR(rdev));
6023                         return PTR_ERR(rdev);
6024                 }
6025                 /* set saved_raid_disk if appropriate */
6026                 if (!mddev->persistent) {
6027                         if (info->state & (1<<MD_DISK_SYNC)  &&
6028                             info->raid_disk < mddev->raid_disks) {
6029                                 rdev->raid_disk = info->raid_disk;
6030                                 set_bit(In_sync, &rdev->flags);
6031                                 clear_bit(Bitmap_sync, &rdev->flags);
6032                         } else
6033                                 rdev->raid_disk = -1;
6034                         rdev->saved_raid_disk = rdev->raid_disk;
6035                 } else
6036                         super_types[mddev->major_version].
6037                                 validate_super(mddev, rdev);
6038                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6039                      rdev->raid_disk != info->raid_disk) {
6040                         /* This was a hot-add request, but events doesn't
6041                          * match, so reject it.
6042                          */
6043                         export_rdev(rdev);
6044                         return -EINVAL;
6045                 }
6046
6047                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6048                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6049                         set_bit(WriteMostly, &rdev->flags);
6050                 else
6051                         clear_bit(WriteMostly, &rdev->flags);
6052
6053                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6054                         struct md_rdev *rdev2;
6055                         bool has_journal = false;
6056
6057                         /* make sure no existing journal disk */
6058                         rdev_for_each(rdev2, mddev) {
6059                                 if (test_bit(Journal, &rdev2->flags)) {
6060                                         has_journal = true;
6061                                         break;
6062                                 }
6063                         }
6064                         if (has_journal) {
6065                                 export_rdev(rdev);
6066                                 return -EBUSY;
6067                         }
6068                         set_bit(Journal, &rdev->flags);
6069                 }
6070                 /*
6071                  * check whether the device shows up in other nodes
6072                  */
6073                 if (mddev_is_clustered(mddev)) {
6074                         if (info->state & (1 << MD_DISK_CANDIDATE))
6075                                 set_bit(Candidate, &rdev->flags);
6076                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6077                                 /* --add initiated by this node */
6078                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6079                                 if (err) {
6080                                         export_rdev(rdev);
6081                                         return err;
6082                                 }
6083                         }
6084                 }
6085
6086                 rdev->raid_disk = -1;
6087                 err = bind_rdev_to_array(rdev, mddev);
6088
6089                 if (err)
6090                         export_rdev(rdev);
6091
6092                 if (mddev_is_clustered(mddev)) {
6093                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6094                                 if (!err) {
6095                                         err = md_cluster_ops->new_disk_ack(mddev,
6096                                                 err == 0);
6097                                         if (err)
6098                                                 md_kick_rdev_from_array(rdev);
6099                                 }
6100                         } else {
6101                                 if (err)
6102                                         md_cluster_ops->add_new_disk_cancel(mddev);
6103                                 else
6104                                         err = add_bound_rdev(rdev);
6105                         }
6106
6107                 } else if (!err)
6108                         err = add_bound_rdev(rdev);
6109
6110                 return err;
6111         }
6112
6113         /* otherwise, add_new_disk is only allowed
6114          * for major_version==0 superblocks
6115          */
6116         if (mddev->major_version != 0) {
6117                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6118                 return -EINVAL;
6119         }
6120
6121         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6122                 int err;
6123                 rdev = md_import_device(dev, -1, 0);
6124                 if (IS_ERR(rdev)) {
6125                         pr_warn("md: error, md_import_device() returned %ld\n",
6126                                 PTR_ERR(rdev));
6127                         return PTR_ERR(rdev);
6128                 }
6129                 rdev->desc_nr = info->number;
6130                 if (info->raid_disk < mddev->raid_disks)
6131                         rdev->raid_disk = info->raid_disk;
6132                 else
6133                         rdev->raid_disk = -1;
6134
6135                 if (rdev->raid_disk < mddev->raid_disks)
6136                         if (info->state & (1<<MD_DISK_SYNC))
6137                                 set_bit(In_sync, &rdev->flags);
6138
6139                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6140                         set_bit(WriteMostly, &rdev->flags);
6141
6142                 if (!mddev->persistent) {
6143                         pr_debug("md: nonpersistent superblock ...\n");
6144                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6145                 } else
6146                         rdev->sb_start = calc_dev_sboffset(rdev);
6147                 rdev->sectors = rdev->sb_start;
6148
6149                 err = bind_rdev_to_array(rdev, mddev);
6150                 if (err) {
6151                         export_rdev(rdev);
6152                         return err;
6153                 }
6154         }
6155
6156         return 0;
6157 }
6158
6159 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6160 {
6161         char b[BDEVNAME_SIZE];
6162         struct md_rdev *rdev;
6163
6164         rdev = find_rdev(mddev, dev);
6165         if (!rdev)
6166                 return -ENXIO;
6167
6168         if (rdev->raid_disk < 0)
6169                 goto kick_rdev;
6170
6171         clear_bit(Blocked, &rdev->flags);
6172         remove_and_add_spares(mddev, rdev);
6173
6174         if (rdev->raid_disk >= 0)
6175                 goto busy;
6176
6177 kick_rdev:
6178         if (mddev_is_clustered(mddev))
6179                 md_cluster_ops->remove_disk(mddev, rdev);
6180
6181         md_kick_rdev_from_array(rdev);
6182         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6183         if (mddev->thread)
6184                 md_wakeup_thread(mddev->thread);
6185         else
6186                 md_update_sb(mddev, 1);
6187         md_new_event(mddev);
6188
6189         return 0;
6190 busy:
6191         pr_debug("md: cannot remove active disk %s from %s ...\n",
6192                  bdevname(rdev->bdev,b), mdname(mddev));
6193         return -EBUSY;
6194 }
6195
6196 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6197 {
6198         char b[BDEVNAME_SIZE];
6199         int err;
6200         struct md_rdev *rdev;
6201
6202         if (!mddev->pers)
6203                 return -ENODEV;
6204
6205         if (mddev->major_version != 0) {
6206                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6207                         mdname(mddev));
6208                 return -EINVAL;
6209         }
6210         if (!mddev->pers->hot_add_disk) {
6211                 pr_warn("%s: personality does not support diskops!\n",
6212                         mdname(mddev));
6213                 return -EINVAL;
6214         }
6215
6216         rdev = md_import_device(dev, -1, 0);
6217         if (IS_ERR(rdev)) {
6218                 pr_warn("md: error, md_import_device() returned %ld\n",
6219                         PTR_ERR(rdev));
6220                 return -EINVAL;
6221         }
6222
6223         if (mddev->persistent)
6224                 rdev->sb_start = calc_dev_sboffset(rdev);
6225         else
6226                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6227
6228         rdev->sectors = rdev->sb_start;
6229
6230         if (test_bit(Faulty, &rdev->flags)) {
6231                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6232                         bdevname(rdev->bdev,b), mdname(mddev));
6233                 err = -EINVAL;
6234                 goto abort_export;
6235         }
6236
6237         clear_bit(In_sync, &rdev->flags);
6238         rdev->desc_nr = -1;
6239         rdev->saved_raid_disk = -1;
6240         err = bind_rdev_to_array(rdev, mddev);
6241         if (err)
6242                 goto abort_export;
6243
6244         /*
6245          * The rest should better be atomic, we can have disk failures
6246          * noticed in interrupt contexts ...
6247          */
6248
6249         rdev->raid_disk = -1;
6250
6251         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6252         if (!mddev->thread)
6253                 md_update_sb(mddev, 1);
6254         /*
6255          * Kick recovery, maybe this spare has to be added to the
6256          * array immediately.
6257          */
6258         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6259         md_wakeup_thread(mddev->thread);
6260         md_new_event(mddev);
6261         return 0;
6262
6263 abort_export:
6264         export_rdev(rdev);
6265         return err;
6266 }
6267
6268 static int set_bitmap_file(struct mddev *mddev, int fd)
6269 {
6270         int err = 0;
6271
6272         if (mddev->pers) {
6273                 if (!mddev->pers->quiesce || !mddev->thread)
6274                         return -EBUSY;
6275                 if (mddev->recovery || mddev->sync_thread)
6276                         return -EBUSY;
6277                 /* we should be able to change the bitmap.. */
6278         }
6279
6280         if (fd >= 0) {
6281                 struct inode *inode;
6282                 struct file *f;
6283
6284                 if (mddev->bitmap || mddev->bitmap_info.file)
6285                         return -EEXIST; /* cannot add when bitmap is present */
6286                 f = fget(fd);
6287
6288                 if (f == NULL) {
6289                         pr_warn("%s: error: failed to get bitmap file\n",
6290                                 mdname(mddev));
6291                         return -EBADF;
6292                 }
6293
6294                 inode = f->f_mapping->host;
6295                 if (!S_ISREG(inode->i_mode)) {
6296                         pr_warn("%s: error: bitmap file must be a regular file\n",
6297                                 mdname(mddev));
6298                         err = -EBADF;
6299                 } else if (!(f->f_mode & FMODE_WRITE)) {
6300                         pr_warn("%s: error: bitmap file must open for write\n",
6301                                 mdname(mddev));
6302                         err = -EBADF;
6303                 } else if (atomic_read(&inode->i_writecount) != 1) {
6304                         pr_warn("%s: error: bitmap file is already in use\n",
6305                                 mdname(mddev));
6306                         err = -EBUSY;
6307                 }
6308                 if (err) {
6309                         fput(f);
6310                         return err;
6311                 }
6312                 mddev->bitmap_info.file = f;
6313                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6314         } else if (mddev->bitmap == NULL)
6315                 return -ENOENT; /* cannot remove what isn't there */
6316         err = 0;
6317         if (mddev->pers) {
6318                 mddev->pers->quiesce(mddev, 1);
6319                 if (fd >= 0) {
6320                         struct bitmap *bitmap;
6321
6322                         bitmap = bitmap_create(mddev, -1);
6323                         if (!IS_ERR(bitmap)) {
6324                                 mddev->bitmap = bitmap;
6325                                 err = bitmap_load(mddev);
6326                         } else
6327                                 err = PTR_ERR(bitmap);
6328                 }
6329                 if (fd < 0 || err) {
6330                         bitmap_destroy(mddev);
6331                         fd = -1; /* make sure to put the file */
6332                 }
6333                 mddev->pers->quiesce(mddev, 0);
6334         }
6335         if (fd < 0) {
6336                 struct file *f = mddev->bitmap_info.file;
6337                 if (f) {
6338                         spin_lock(&mddev->lock);
6339                         mddev->bitmap_info.file = NULL;
6340                         spin_unlock(&mddev->lock);
6341                         fput(f);
6342                 }
6343         }
6344
6345         return err;
6346 }
6347
6348 /*
6349  * set_array_info is used two different ways
6350  * The original usage is when creating a new array.
6351  * In this usage, raid_disks is > 0 and it together with
6352  *  level, size, not_persistent,layout,chunksize determine the
6353  *  shape of the array.
6354  *  This will always create an array with a type-0.90.0 superblock.
6355  * The newer usage is when assembling an array.
6356  *  In this case raid_disks will be 0, and the major_version field is
6357  *  use to determine which style super-blocks are to be found on the devices.
6358  *  The minor and patch _version numbers are also kept incase the
6359  *  super_block handler wishes to interpret them.
6360  */
6361 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6362 {
6363
6364         if (info->raid_disks == 0) {
6365                 /* just setting version number for superblock loading */
6366                 if (info->major_version < 0 ||
6367                     info->major_version >= ARRAY_SIZE(super_types) ||
6368                     super_types[info->major_version].name == NULL) {
6369                         /* maybe try to auto-load a module? */
6370                         pr_warn("md: superblock version %d not known\n",
6371                                 info->major_version);
6372                         return -EINVAL;
6373                 }
6374                 mddev->major_version = info->major_version;
6375                 mddev->minor_version = info->minor_version;
6376                 mddev->patch_version = info->patch_version;
6377                 mddev->persistent = !info->not_persistent;
6378                 /* ensure mddev_put doesn't delete this now that there
6379                  * is some minimal configuration.
6380                  */
6381                 mddev->ctime         = ktime_get_real_seconds();
6382                 return 0;
6383         }
6384         mddev->major_version = MD_MAJOR_VERSION;
6385         mddev->minor_version = MD_MINOR_VERSION;
6386         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6387         mddev->ctime         = ktime_get_real_seconds();
6388
6389         mddev->level         = info->level;
6390         mddev->clevel[0]     = 0;
6391         mddev->dev_sectors   = 2 * (sector_t)info->size;
6392         mddev->raid_disks    = info->raid_disks;
6393         /* don't set md_minor, it is determined by which /dev/md* was
6394          * openned
6395          */
6396         if (info->state & (1<<MD_SB_CLEAN))
6397                 mddev->recovery_cp = MaxSector;
6398         else
6399                 mddev->recovery_cp = 0;
6400         mddev->persistent    = ! info->not_persistent;
6401         mddev->external      = 0;
6402
6403         mddev->layout        = info->layout;
6404         mddev->chunk_sectors = info->chunk_size >> 9;
6405
6406         mddev->max_disks     = MD_SB_DISKS;
6407
6408         if (mddev->persistent)
6409                 mddev->flags         = 0;
6410         set_bit(MD_CHANGE_DEVS, &mddev->flags);
6411
6412         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6413         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6414         mddev->bitmap_info.offset = 0;
6415
6416         mddev->reshape_position = MaxSector;
6417
6418         /*
6419          * Generate a 128 bit UUID
6420          */
6421         get_random_bytes(mddev->uuid, 16);
6422
6423         mddev->new_level = mddev->level;
6424         mddev->new_chunk_sectors = mddev->chunk_sectors;
6425         mddev->new_layout = mddev->layout;
6426         mddev->delta_disks = 0;
6427         mddev->reshape_backwards = 0;
6428
6429         return 0;
6430 }
6431
6432 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6433 {
6434         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6435
6436         if (mddev->external_size)
6437                 return;
6438
6439         mddev->array_sectors = array_sectors;
6440 }
6441 EXPORT_SYMBOL(md_set_array_sectors);
6442
6443 static int update_size(struct mddev *mddev, sector_t num_sectors)
6444 {
6445         struct md_rdev *rdev;
6446         int rv;
6447         int fit = (num_sectors == 0);
6448
6449         /* cluster raid doesn't support update size */
6450         if (mddev_is_clustered(mddev))
6451                 return -EINVAL;
6452
6453         if (mddev->pers->resize == NULL)
6454                 return -EINVAL;
6455         /* The "num_sectors" is the number of sectors of each device that
6456          * is used.  This can only make sense for arrays with redundancy.
6457          * linear and raid0 always use whatever space is available. We can only
6458          * consider changing this number if no resync or reconstruction is
6459          * happening, and if the new size is acceptable. It must fit before the
6460          * sb_start or, if that is <data_offset, it must fit before the size
6461          * of each device.  If num_sectors is zero, we find the largest size
6462          * that fits.
6463          */
6464         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6465             mddev->sync_thread)
6466                 return -EBUSY;
6467         if (mddev->ro)
6468                 return -EROFS;
6469
6470         rdev_for_each(rdev, mddev) {
6471                 sector_t avail = rdev->sectors;
6472
6473                 if (fit && (num_sectors == 0 || num_sectors > avail))
6474                         num_sectors = avail;
6475                 if (avail < num_sectors)
6476                         return -ENOSPC;
6477         }
6478         rv = mddev->pers->resize(mddev, num_sectors);
6479         if (!rv)
6480                 revalidate_disk(mddev->gendisk);
6481         return rv;
6482 }
6483
6484 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6485 {
6486         int rv;
6487         struct md_rdev *rdev;
6488         /* change the number of raid disks */
6489         if (mddev->pers->check_reshape == NULL)
6490                 return -EINVAL;
6491         if (mddev->ro)
6492                 return -EROFS;
6493         if (raid_disks <= 0 ||
6494             (mddev->max_disks && raid_disks >= mddev->max_disks))
6495                 return -EINVAL;
6496         if (mddev->sync_thread ||
6497             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6498             mddev->reshape_position != MaxSector)
6499                 return -EBUSY;
6500
6501         rdev_for_each(rdev, mddev) {
6502                 if (mddev->raid_disks < raid_disks &&
6503                     rdev->data_offset < rdev->new_data_offset)
6504                         return -EINVAL;
6505                 if (mddev->raid_disks > raid_disks &&
6506                     rdev->data_offset > rdev->new_data_offset)
6507                         return -EINVAL;
6508         }
6509
6510         mddev->delta_disks = raid_disks - mddev->raid_disks;
6511         if (mddev->delta_disks < 0)
6512                 mddev->reshape_backwards = 1;
6513         else if (mddev->delta_disks > 0)
6514                 mddev->reshape_backwards = 0;
6515
6516         rv = mddev->pers->check_reshape(mddev);
6517         if (rv < 0) {
6518                 mddev->delta_disks = 0;
6519                 mddev->reshape_backwards = 0;
6520         }
6521         return rv;
6522 }
6523
6524 /*
6525  * update_array_info is used to change the configuration of an
6526  * on-line array.
6527  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6528  * fields in the info are checked against the array.
6529  * Any differences that cannot be handled will cause an error.
6530  * Normally, only one change can be managed at a time.
6531  */
6532 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6533 {
6534         int rv = 0;
6535         int cnt = 0;
6536         int state = 0;
6537
6538         /* calculate expected state,ignoring low bits */
6539         if (mddev->bitmap && mddev->bitmap_info.offset)
6540                 state |= (1 << MD_SB_BITMAP_PRESENT);
6541
6542         if (mddev->major_version != info->major_version ||
6543             mddev->minor_version != info->minor_version ||
6544 /*          mddev->patch_version != info->patch_version || */
6545             mddev->ctime         != info->ctime         ||
6546             mddev->level         != info->level         ||
6547 /*          mddev->layout        != info->layout        || */
6548             mddev->persistent    != !info->not_persistent ||
6549             mddev->chunk_sectors != info->chunk_size >> 9 ||
6550             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6551             ((state^info->state) & 0xfffffe00)
6552                 )
6553                 return -EINVAL;
6554         /* Check there is only one change */
6555         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6556                 cnt++;
6557         if (mddev->raid_disks != info->raid_disks)
6558                 cnt++;
6559         if (mddev->layout != info->layout)
6560                 cnt++;
6561         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6562                 cnt++;
6563         if (cnt == 0)
6564                 return 0;
6565         if (cnt > 1)
6566                 return -EINVAL;
6567
6568         if (mddev->layout != info->layout) {
6569                 /* Change layout
6570                  * we don't need to do anything at the md level, the
6571                  * personality will take care of it all.
6572                  */
6573                 if (mddev->pers->check_reshape == NULL)
6574                         return -EINVAL;
6575                 else {
6576                         mddev->new_layout = info->layout;
6577                         rv = mddev->pers->check_reshape(mddev);
6578                         if (rv)
6579                                 mddev->new_layout = mddev->layout;
6580                         return rv;
6581                 }
6582         }
6583         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6584                 rv = update_size(mddev, (sector_t)info->size * 2);
6585
6586         if (mddev->raid_disks    != info->raid_disks)
6587                 rv = update_raid_disks(mddev, info->raid_disks);
6588
6589         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6590                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6591                         rv = -EINVAL;
6592                         goto err;
6593                 }
6594                 if (mddev->recovery || mddev->sync_thread) {
6595                         rv = -EBUSY;
6596                         goto err;
6597                 }
6598                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6599                         struct bitmap *bitmap;
6600                         /* add the bitmap */
6601                         if (mddev->bitmap) {
6602                                 rv = -EEXIST;
6603                                 goto err;
6604                         }
6605                         if (mddev->bitmap_info.default_offset == 0) {
6606                                 rv = -EINVAL;
6607                                 goto err;
6608                         }
6609                         mddev->bitmap_info.offset =
6610                                 mddev->bitmap_info.default_offset;
6611                         mddev->bitmap_info.space =
6612                                 mddev->bitmap_info.default_space;
6613                         mddev->pers->quiesce(mddev, 1);
6614                         bitmap = bitmap_create(mddev, -1);
6615                         if (!IS_ERR(bitmap)) {
6616                                 mddev->bitmap = bitmap;
6617                                 rv = bitmap_load(mddev);
6618                         } else
6619                                 rv = PTR_ERR(bitmap);
6620                         if (rv)
6621                                 bitmap_destroy(mddev);
6622                         mddev->pers->quiesce(mddev, 0);
6623                 } else {
6624                         /* remove the bitmap */
6625                         if (!mddev->bitmap) {
6626                                 rv = -ENOENT;
6627                                 goto err;
6628                         }
6629                         if (mddev->bitmap->storage.file) {
6630                                 rv = -EINVAL;
6631                                 goto err;
6632                         }
6633                         if (mddev->bitmap_info.nodes) {
6634                                 /* hold PW on all the bitmap lock */
6635                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6636                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
6637                                         rv = -EPERM;
6638                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6639                                         goto err;
6640                                 }
6641
6642                                 mddev->bitmap_info.nodes = 0;
6643                                 md_cluster_ops->leave(mddev);
6644                         }
6645                         mddev->pers->quiesce(mddev, 1);
6646                         bitmap_destroy(mddev);
6647                         mddev->pers->quiesce(mddev, 0);
6648                         mddev->bitmap_info.offset = 0;
6649                 }
6650         }
6651         md_update_sb(mddev, 1);
6652         return rv;
6653 err:
6654         return rv;
6655 }
6656
6657 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6658 {
6659         struct md_rdev *rdev;
6660         int err = 0;
6661
6662         if (mddev->pers == NULL)
6663                 return -ENODEV;
6664
6665         rcu_read_lock();
6666         rdev = find_rdev_rcu(mddev, dev);
6667         if (!rdev)
6668                 err =  -ENODEV;
6669         else {
6670                 md_error(mddev, rdev);
6671                 if (!test_bit(Faulty, &rdev->flags))
6672                         err = -EBUSY;
6673         }
6674         rcu_read_unlock();
6675         return err;
6676 }
6677
6678 /*
6679  * We have a problem here : there is no easy way to give a CHS
6680  * virtual geometry. We currently pretend that we have a 2 heads
6681  * 4 sectors (with a BIG number of cylinders...). This drives
6682  * dosfs just mad... ;-)
6683  */
6684 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6685 {
6686         struct mddev *mddev = bdev->bd_disk->private_data;
6687
6688         geo->heads = 2;
6689         geo->sectors = 4;
6690         geo->cylinders = mddev->array_sectors / 8;
6691         return 0;
6692 }
6693
6694 static inline bool md_ioctl_valid(unsigned int cmd)
6695 {
6696         switch (cmd) {
6697         case ADD_NEW_DISK:
6698         case BLKROSET:
6699         case GET_ARRAY_INFO:
6700         case GET_BITMAP_FILE:
6701         case GET_DISK_INFO:
6702         case HOT_ADD_DISK:
6703         case HOT_REMOVE_DISK:
6704         case RAID_AUTORUN:
6705         case RAID_VERSION:
6706         case RESTART_ARRAY_RW:
6707         case RUN_ARRAY:
6708         case SET_ARRAY_INFO:
6709         case SET_BITMAP_FILE:
6710         case SET_DISK_FAULTY:
6711         case STOP_ARRAY:
6712         case STOP_ARRAY_RO:
6713         case CLUSTERED_DISK_NACK:
6714                 return true;
6715         default:
6716                 return false;
6717         }
6718 }
6719
6720 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6721                         unsigned int cmd, unsigned long arg)
6722 {
6723         int err = 0;
6724         void __user *argp = (void __user *)arg;
6725         struct mddev *mddev = NULL;
6726         int ro;
6727
6728         if (!md_ioctl_valid(cmd))
6729                 return -ENOTTY;
6730
6731         switch (cmd) {
6732         case RAID_VERSION:
6733         case GET_ARRAY_INFO:
6734         case GET_DISK_INFO:
6735                 break;
6736         default:
6737                 if (!capable(CAP_SYS_ADMIN))
6738                         return -EACCES;
6739         }
6740
6741         /*
6742          * Commands dealing with the RAID driver but not any
6743          * particular array:
6744          */
6745         switch (cmd) {
6746         case RAID_VERSION:
6747                 err = get_version(argp);
6748                 goto out;
6749
6750 #ifndef MODULE
6751         case RAID_AUTORUN:
6752                 err = 0;
6753                 autostart_arrays(arg);
6754                 goto out;
6755 #endif
6756         default:;
6757         }
6758
6759         /*
6760          * Commands creating/starting a new array:
6761          */
6762
6763         mddev = bdev->bd_disk->private_data;
6764
6765         if (!mddev) {
6766                 BUG();
6767                 goto out;
6768         }
6769
6770         /* Some actions do not requires the mutex */
6771         switch (cmd) {
6772         case GET_ARRAY_INFO:
6773                 if (!mddev->raid_disks && !mddev->external)
6774                         err = -ENODEV;
6775                 else
6776                         err = get_array_info(mddev, argp);
6777                 goto out;
6778
6779         case GET_DISK_INFO:
6780                 if (!mddev->raid_disks && !mddev->external)
6781                         err = -ENODEV;
6782                 else
6783                         err = get_disk_info(mddev, argp);
6784                 goto out;
6785
6786         case SET_DISK_FAULTY:
6787                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6788                 goto out;
6789
6790         case GET_BITMAP_FILE:
6791                 err = get_bitmap_file(mddev, argp);
6792                 goto out;
6793
6794         }
6795
6796         if (cmd == ADD_NEW_DISK)
6797                 /* need to ensure md_delayed_delete() has completed */
6798                 flush_workqueue(md_misc_wq);
6799
6800         if (cmd == HOT_REMOVE_DISK)
6801                 /* need to ensure recovery thread has run */
6802                 wait_event_interruptible_timeout(mddev->sb_wait,
6803                                                  !test_bit(MD_RECOVERY_NEEDED,
6804                                                            &mddev->flags),
6805                                                  msecs_to_jiffies(5000));
6806         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6807                 /* Need to flush page cache, and ensure no-one else opens
6808                  * and writes
6809                  */
6810                 mutex_lock(&mddev->open_mutex);
6811                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6812                         mutex_unlock(&mddev->open_mutex);
6813                         err = -EBUSY;
6814                         goto out;
6815                 }
6816                 set_bit(MD_CLOSING, &mddev->flags);
6817                 mutex_unlock(&mddev->open_mutex);
6818                 sync_blockdev(bdev);
6819         }
6820         err = mddev_lock(mddev);
6821         if (err) {
6822                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
6823                          err, cmd);
6824                 goto out;
6825         }
6826
6827         if (cmd == SET_ARRAY_INFO) {
6828                 mdu_array_info_t info;
6829                 if (!arg)
6830                         memset(&info, 0, sizeof(info));
6831                 else if (copy_from_user(&info, argp, sizeof(info))) {
6832                         err = -EFAULT;
6833                         goto unlock;
6834                 }
6835                 if (mddev->pers) {
6836                         err = update_array_info(mddev, &info);
6837                         if (err) {
6838                                 pr_warn("md: couldn't update array info. %d\n", err);
6839                                 goto unlock;
6840                         }
6841                         goto unlock;
6842                 }
6843                 if (!list_empty(&mddev->disks)) {
6844                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
6845                         err = -EBUSY;
6846                         goto unlock;
6847                 }
6848                 if (mddev->raid_disks) {
6849                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
6850                         err = -EBUSY;
6851                         goto unlock;
6852                 }
6853                 err = set_array_info(mddev, &info);
6854                 if (err) {
6855                         pr_warn("md: couldn't set array info. %d\n", err);
6856                         goto unlock;
6857                 }
6858                 goto unlock;
6859         }
6860
6861         /*
6862          * Commands querying/configuring an existing array:
6863          */
6864         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6865          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6866         if ((!mddev->raid_disks && !mddev->external)
6867             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6868             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6869             && cmd != GET_BITMAP_FILE) {
6870                 err = -ENODEV;
6871                 goto unlock;
6872         }
6873
6874         /*
6875          * Commands even a read-only array can execute:
6876          */
6877         switch (cmd) {
6878         case RESTART_ARRAY_RW:
6879                 err = restart_array(mddev);
6880                 goto unlock;
6881
6882         case STOP_ARRAY:
6883                 err = do_md_stop(mddev, 0, bdev);
6884                 goto unlock;
6885
6886         case STOP_ARRAY_RO:
6887                 err = md_set_readonly(mddev, bdev);
6888                 goto unlock;
6889
6890         case HOT_REMOVE_DISK:
6891                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6892                 goto unlock;
6893
6894         case ADD_NEW_DISK:
6895                 /* We can support ADD_NEW_DISK on read-only arrays
6896                  * only if we are re-adding a preexisting device.
6897                  * So require mddev->pers and MD_DISK_SYNC.
6898                  */
6899                 if (mddev->pers) {
6900                         mdu_disk_info_t info;
6901                         if (copy_from_user(&info, argp, sizeof(info)))
6902                                 err = -EFAULT;
6903                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6904                                 /* Need to clear read-only for this */
6905                                 break;
6906                         else
6907                                 err = add_new_disk(mddev, &info);
6908                         goto unlock;
6909                 }
6910                 break;
6911
6912         case BLKROSET:
6913                 if (get_user(ro, (int __user *)(arg))) {
6914                         err = -EFAULT;
6915                         goto unlock;
6916                 }
6917                 err = -EINVAL;
6918
6919                 /* if the bdev is going readonly the value of mddev->ro
6920                  * does not matter, no writes are coming
6921                  */
6922                 if (ro)
6923                         goto unlock;
6924
6925                 /* are we are already prepared for writes? */
6926                 if (mddev->ro != 1)
6927                         goto unlock;
6928
6929                 /* transitioning to readauto need only happen for
6930                  * arrays that call md_write_start
6931                  */
6932                 if (mddev->pers) {
6933                         err = restart_array(mddev);
6934                         if (err == 0) {
6935                                 mddev->ro = 2;
6936                                 set_disk_ro(mddev->gendisk, 0);
6937                         }
6938                 }
6939                 goto unlock;
6940         }
6941
6942         /*
6943          * The remaining ioctls are changing the state of the
6944          * superblock, so we do not allow them on read-only arrays.
6945          */
6946         if (mddev->ro && mddev->pers) {
6947                 if (mddev->ro == 2) {
6948                         mddev->ro = 0;
6949                         sysfs_notify_dirent_safe(mddev->sysfs_state);
6950                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6951                         /* mddev_unlock will wake thread */
6952                         /* If a device failed while we were read-only, we
6953                          * need to make sure the metadata is updated now.
6954                          */
6955                         if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
6956                                 mddev_unlock(mddev);
6957                                 wait_event(mddev->sb_wait,
6958                                            !test_bit(MD_CHANGE_DEVS, &mddev->flags) &&
6959                                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
6960                                 mddev_lock_nointr(mddev);
6961                         }
6962                 } else {
6963                         err = -EROFS;
6964                         goto unlock;
6965                 }
6966         }
6967
6968         switch (cmd) {
6969         case ADD_NEW_DISK:
6970         {
6971                 mdu_disk_info_t info;
6972                 if (copy_from_user(&info, argp, sizeof(info)))
6973                         err = -EFAULT;
6974                 else
6975                         err = add_new_disk(mddev, &info);
6976                 goto unlock;
6977         }
6978
6979         case CLUSTERED_DISK_NACK:
6980                 if (mddev_is_clustered(mddev))
6981                         md_cluster_ops->new_disk_ack(mddev, false);
6982                 else
6983                         err = -EINVAL;
6984                 goto unlock;
6985
6986         case HOT_ADD_DISK:
6987                 err = hot_add_disk(mddev, new_decode_dev(arg));
6988                 goto unlock;
6989
6990         case RUN_ARRAY:
6991                 err = do_md_run(mddev);
6992                 goto unlock;
6993
6994         case SET_BITMAP_FILE:
6995                 err = set_bitmap_file(mddev, (int)arg);
6996                 goto unlock;
6997
6998         default:
6999                 err = -EINVAL;
7000                 goto unlock;
7001         }
7002
7003 unlock:
7004         if (mddev->hold_active == UNTIL_IOCTL &&
7005             err != -EINVAL)
7006                 mddev->hold_active = 0;
7007         mddev_unlock(mddev);
7008 out:
7009         return err;
7010 }
7011 #ifdef CONFIG_COMPAT
7012 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7013                     unsigned int cmd, unsigned long arg)
7014 {
7015         switch (cmd) {
7016         case HOT_REMOVE_DISK:
7017         case HOT_ADD_DISK:
7018         case SET_DISK_FAULTY:
7019         case SET_BITMAP_FILE:
7020                 /* These take in integer arg, do not convert */
7021                 break;
7022         default:
7023                 arg = (unsigned long)compat_ptr(arg);
7024                 break;
7025         }
7026
7027         return md_ioctl(bdev, mode, cmd, arg);
7028 }
7029 #endif /* CONFIG_COMPAT */
7030
7031 static int md_open(struct block_device *bdev, fmode_t mode)
7032 {
7033         /*
7034          * Succeed if we can lock the mddev, which confirms that
7035          * it isn't being stopped right now.
7036          */
7037         struct mddev *mddev = mddev_find(bdev->bd_dev);
7038         int err;
7039
7040         if (!mddev)
7041                 return -ENODEV;
7042
7043         if (mddev->gendisk != bdev->bd_disk) {
7044                 /* we are racing with mddev_put which is discarding this
7045                  * bd_disk.
7046                  */
7047                 mddev_put(mddev);
7048                 /* Wait until bdev->bd_disk is definitely gone */
7049                 flush_workqueue(md_misc_wq);
7050                 /* Then retry the open from the top */
7051                 return -ERESTARTSYS;
7052         }
7053         BUG_ON(mddev != bdev->bd_disk->private_data);
7054
7055         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7056                 goto out;
7057
7058         if (test_bit(MD_CLOSING, &mddev->flags)) {
7059                 mutex_unlock(&mddev->open_mutex);
7060                 return -ENODEV;
7061         }
7062
7063         err = 0;
7064         atomic_inc(&mddev->openers);
7065         mutex_unlock(&mddev->open_mutex);
7066
7067         check_disk_change(bdev);
7068  out:
7069         return err;
7070 }
7071
7072 static void md_release(struct gendisk *disk, fmode_t mode)
7073 {
7074         struct mddev *mddev = disk->private_data;
7075
7076         BUG_ON(!mddev);
7077         atomic_dec(&mddev->openers);
7078         mddev_put(mddev);
7079 }
7080
7081 static int md_media_changed(struct gendisk *disk)
7082 {
7083         struct mddev *mddev = disk->private_data;
7084
7085         return mddev->changed;
7086 }
7087
7088 static int md_revalidate(struct gendisk *disk)
7089 {
7090         struct mddev *mddev = disk->private_data;
7091
7092         mddev->changed = 0;
7093         return 0;
7094 }
7095 static const struct block_device_operations md_fops =
7096 {
7097         .owner          = THIS_MODULE,
7098         .open           = md_open,
7099         .release        = md_release,
7100         .ioctl          = md_ioctl,
7101 #ifdef CONFIG_COMPAT
7102         .compat_ioctl   = md_compat_ioctl,
7103 #endif
7104         .getgeo         = md_getgeo,
7105         .media_changed  = md_media_changed,
7106         .revalidate_disk= md_revalidate,
7107 };
7108
7109 static int md_thread(void *arg)
7110 {
7111         struct md_thread *thread = arg;
7112
7113         /*
7114          * md_thread is a 'system-thread', it's priority should be very
7115          * high. We avoid resource deadlocks individually in each
7116          * raid personality. (RAID5 does preallocation) We also use RR and
7117          * the very same RT priority as kswapd, thus we will never get
7118          * into a priority inversion deadlock.
7119          *
7120          * we definitely have to have equal or higher priority than
7121          * bdflush, otherwise bdflush will deadlock if there are too
7122          * many dirty RAID5 blocks.
7123          */
7124
7125         allow_signal(SIGKILL);
7126         while (!kthread_should_stop()) {
7127
7128                 /* We need to wait INTERRUPTIBLE so that
7129                  * we don't add to the load-average.
7130                  * That means we need to be sure no signals are
7131                  * pending
7132                  */
7133                 if (signal_pending(current))
7134                         flush_signals(current);
7135
7136                 wait_event_interruptible_timeout
7137                         (thread->wqueue,
7138                          test_bit(THREAD_WAKEUP, &thread->flags)
7139                          || kthread_should_stop(),
7140                          thread->timeout);
7141
7142                 clear_bit(THREAD_WAKEUP, &thread->flags);
7143                 if (!kthread_should_stop())
7144                         thread->run(thread);
7145         }
7146
7147         return 0;
7148 }
7149
7150 void md_wakeup_thread(struct md_thread *thread)
7151 {
7152         if (thread) {
7153                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7154                 set_bit(THREAD_WAKEUP, &thread->flags);
7155                 wake_up(&thread->wqueue);
7156         }
7157 }
7158 EXPORT_SYMBOL(md_wakeup_thread);
7159
7160 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7161                 struct mddev *mddev, const char *name)
7162 {
7163         struct md_thread *thread;
7164
7165         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7166         if (!thread)
7167                 return NULL;
7168
7169         init_waitqueue_head(&thread->wqueue);
7170
7171         thread->run = run;
7172         thread->mddev = mddev;
7173         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7174         thread->tsk = kthread_run(md_thread, thread,
7175                                   "%s_%s",
7176                                   mdname(thread->mddev),
7177                                   name);
7178         if (IS_ERR(thread->tsk)) {
7179                 kfree(thread);
7180                 return NULL;
7181         }
7182         return thread;
7183 }
7184 EXPORT_SYMBOL(md_register_thread);
7185
7186 void md_unregister_thread(struct md_thread **threadp)
7187 {
7188         struct md_thread *thread = *threadp;
7189         if (!thread)
7190                 return;
7191         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7192         /* Locking ensures that mddev_unlock does not wake_up a
7193          * non-existent thread
7194          */
7195         spin_lock(&pers_lock);
7196         *threadp = NULL;
7197         spin_unlock(&pers_lock);
7198
7199         kthread_stop(thread->tsk);
7200         kfree(thread);
7201 }
7202 EXPORT_SYMBOL(md_unregister_thread);
7203
7204 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7205 {
7206         if (!rdev || test_bit(Faulty, &rdev->flags))
7207                 return;
7208
7209         if (!mddev->pers || !mddev->pers->error_handler)
7210                 return;
7211         mddev->pers->error_handler(mddev,rdev);
7212         if (mddev->degraded)
7213                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7214         sysfs_notify_dirent_safe(rdev->sysfs_state);
7215         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7216         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7217         md_wakeup_thread(mddev->thread);
7218         if (mddev->event_work.func)
7219                 queue_work(md_misc_wq, &mddev->event_work);
7220         md_new_event(mddev);
7221 }
7222 EXPORT_SYMBOL(md_error);
7223
7224 /* seq_file implementation /proc/mdstat */
7225
7226 static void status_unused(struct seq_file *seq)
7227 {
7228         int i = 0;
7229         struct md_rdev *rdev;
7230
7231         seq_printf(seq, "unused devices: ");
7232
7233         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7234                 char b[BDEVNAME_SIZE];
7235                 i++;
7236                 seq_printf(seq, "%s ",
7237                               bdevname(rdev->bdev,b));
7238         }
7239         if (!i)
7240                 seq_printf(seq, "<none>");
7241
7242         seq_printf(seq, "\n");
7243 }
7244
7245 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7246 {
7247         sector_t max_sectors, resync, res;
7248         unsigned long dt, db;
7249         sector_t rt;
7250         int scale;
7251         unsigned int per_milli;
7252
7253         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7254             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7255                 max_sectors = mddev->resync_max_sectors;
7256         else
7257                 max_sectors = mddev->dev_sectors;
7258
7259         resync = mddev->curr_resync;
7260         if (resync <= 3) {
7261                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7262                         /* Still cleaning up */
7263                         resync = max_sectors;
7264         } else
7265                 resync -= atomic_read(&mddev->recovery_active);
7266
7267         if (resync == 0) {
7268                 if (mddev->recovery_cp < MaxSector) {
7269                         seq_printf(seq, "\tresync=PENDING");
7270                         return 1;
7271                 }
7272                 return 0;
7273         }
7274         if (resync < 3) {
7275                 seq_printf(seq, "\tresync=DELAYED");
7276                 return 1;
7277         }
7278
7279         WARN_ON(max_sectors == 0);
7280         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7281          * in a sector_t, and (max_sectors>>scale) will fit in a
7282          * u32, as those are the requirements for sector_div.
7283          * Thus 'scale' must be at least 10
7284          */
7285         scale = 10;
7286         if (sizeof(sector_t) > sizeof(unsigned long)) {
7287                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7288                         scale++;
7289         }
7290         res = (resync>>scale)*1000;
7291         sector_div(res, (u32)((max_sectors>>scale)+1));
7292
7293         per_milli = res;
7294         {
7295                 int i, x = per_milli/50, y = 20-x;
7296                 seq_printf(seq, "[");
7297                 for (i = 0; i < x; i++)
7298                         seq_printf(seq, "=");
7299                 seq_printf(seq, ">");
7300                 for (i = 0; i < y; i++)
7301                         seq_printf(seq, ".");
7302                 seq_printf(seq, "] ");
7303         }
7304         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7305                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7306                     "reshape" :
7307                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7308                      "check" :
7309                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7310                       "resync" : "recovery"))),
7311                    per_milli/10, per_milli % 10,
7312                    (unsigned long long) resync/2,
7313                    (unsigned long long) max_sectors/2);
7314
7315         /*
7316          * dt: time from mark until now
7317          * db: blocks written from mark until now
7318          * rt: remaining time
7319          *
7320          * rt is a sector_t, so could be 32bit or 64bit.
7321          * So we divide before multiply in case it is 32bit and close
7322          * to the limit.
7323          * We scale the divisor (db) by 32 to avoid losing precision
7324          * near the end of resync when the number of remaining sectors
7325          * is close to 'db'.
7326          * We then divide rt by 32 after multiplying by db to compensate.
7327          * The '+1' avoids division by zero if db is very small.
7328          */
7329         dt = ((jiffies - mddev->resync_mark) / HZ);
7330         if (!dt) dt++;
7331         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7332                 - mddev->resync_mark_cnt;
7333
7334         rt = max_sectors - resync;    /* number of remaining sectors */
7335         sector_div(rt, db/32+1);
7336         rt *= dt;
7337         rt >>= 5;
7338
7339         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7340                    ((unsigned long)rt % 60)/6);
7341
7342         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7343         return 1;
7344 }
7345
7346 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7347 {
7348         struct list_head *tmp;
7349         loff_t l = *pos;
7350         struct mddev *mddev;
7351
7352         if (l >= 0x10000)
7353                 return NULL;
7354         if (!l--)
7355                 /* header */
7356                 return (void*)1;
7357
7358         spin_lock(&all_mddevs_lock);
7359         list_for_each(tmp,&all_mddevs)
7360                 if (!l--) {
7361                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7362                         mddev_get(mddev);
7363                         spin_unlock(&all_mddevs_lock);
7364                         return mddev;
7365                 }
7366         spin_unlock(&all_mddevs_lock);
7367         if (!l--)
7368                 return (void*)2;/* tail */
7369         return NULL;
7370 }
7371
7372 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7373 {
7374         struct list_head *tmp;
7375         struct mddev *next_mddev, *mddev = v;
7376
7377         ++*pos;
7378         if (v == (void*)2)
7379                 return NULL;
7380
7381         spin_lock(&all_mddevs_lock);
7382         if (v == (void*)1)
7383                 tmp = all_mddevs.next;
7384         else
7385                 tmp = mddev->all_mddevs.next;
7386         if (tmp != &all_mddevs)
7387                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7388         else {
7389                 next_mddev = (void*)2;
7390                 *pos = 0x10000;
7391         }
7392         spin_unlock(&all_mddevs_lock);
7393
7394         if (v != (void*)1)
7395                 mddev_put(mddev);
7396         return next_mddev;
7397
7398 }
7399
7400 static void md_seq_stop(struct seq_file *seq, void *v)
7401 {
7402         struct mddev *mddev = v;
7403
7404         if (mddev && v != (void*)1 && v != (void*)2)
7405                 mddev_put(mddev);
7406 }
7407
7408 static int md_seq_show(struct seq_file *seq, void *v)
7409 {
7410         struct mddev *mddev = v;
7411         sector_t sectors;
7412         struct md_rdev *rdev;
7413
7414         if (v == (void*)1) {
7415                 struct md_personality *pers;
7416                 seq_printf(seq, "Personalities : ");
7417                 spin_lock(&pers_lock);
7418                 list_for_each_entry(pers, &pers_list, list)
7419                         seq_printf(seq, "[%s] ", pers->name);
7420
7421                 spin_unlock(&pers_lock);
7422                 seq_printf(seq, "\n");
7423                 seq->poll_event = atomic_read(&md_event_count);
7424                 return 0;
7425         }
7426         if (v == (void*)2) {
7427                 status_unused(seq);
7428                 return 0;
7429         }
7430
7431         spin_lock(&mddev->lock);
7432         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7433                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7434                                                 mddev->pers ? "" : "in");
7435                 if (mddev->pers) {
7436                         if (mddev->ro==1)
7437                                 seq_printf(seq, " (read-only)");
7438                         if (mddev->ro==2)
7439                                 seq_printf(seq, " (auto-read-only)");
7440                         seq_printf(seq, " %s", mddev->pers->name);
7441                 }
7442
7443                 sectors = 0;
7444                 rcu_read_lock();
7445                 rdev_for_each_rcu(rdev, mddev) {
7446                         char b[BDEVNAME_SIZE];
7447                         seq_printf(seq, " %s[%d]",
7448                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7449                         if (test_bit(WriteMostly, &rdev->flags))
7450                                 seq_printf(seq, "(W)");
7451                         if (test_bit(Journal, &rdev->flags))
7452                                 seq_printf(seq, "(J)");
7453                         if (test_bit(Faulty, &rdev->flags)) {
7454                                 seq_printf(seq, "(F)");
7455                                 continue;
7456                         }
7457                         if (rdev->raid_disk < 0)
7458                                 seq_printf(seq, "(S)"); /* spare */
7459                         if (test_bit(Replacement, &rdev->flags))
7460                                 seq_printf(seq, "(R)");
7461                         sectors += rdev->sectors;
7462                 }
7463                 rcu_read_unlock();
7464
7465                 if (!list_empty(&mddev->disks)) {
7466                         if (mddev->pers)
7467                                 seq_printf(seq, "\n      %llu blocks",
7468                                            (unsigned long long)
7469                                            mddev->array_sectors / 2);
7470                         else
7471                                 seq_printf(seq, "\n      %llu blocks",
7472                                            (unsigned long long)sectors / 2);
7473                 }
7474                 if (mddev->persistent) {
7475                         if (mddev->major_version != 0 ||
7476                             mddev->minor_version != 90) {
7477                                 seq_printf(seq," super %d.%d",
7478                                            mddev->major_version,
7479                                            mddev->minor_version);
7480                         }
7481                 } else if (mddev->external)
7482                         seq_printf(seq, " super external:%s",
7483                                    mddev->metadata_type);
7484                 else
7485                         seq_printf(seq, " super non-persistent");
7486
7487                 if (mddev->pers) {
7488                         mddev->pers->status(seq, mddev);
7489                         seq_printf(seq, "\n      ");
7490                         if (mddev->pers->sync_request) {
7491                                 if (status_resync(seq, mddev))
7492                                         seq_printf(seq, "\n      ");
7493                         }
7494                 } else
7495                         seq_printf(seq, "\n       ");
7496
7497                 bitmap_status(seq, mddev->bitmap);
7498
7499                 seq_printf(seq, "\n");
7500         }
7501         spin_unlock(&mddev->lock);
7502
7503         return 0;
7504 }
7505
7506 static const struct seq_operations md_seq_ops = {
7507         .start  = md_seq_start,
7508         .next   = md_seq_next,
7509         .stop   = md_seq_stop,
7510         .show   = md_seq_show,
7511 };
7512
7513 static int md_seq_open(struct inode *inode, struct file *file)
7514 {
7515         struct seq_file *seq;
7516         int error;
7517
7518         error = seq_open(file, &md_seq_ops);
7519         if (error)
7520                 return error;
7521
7522         seq = file->private_data;
7523         seq->poll_event = atomic_read(&md_event_count);
7524         return error;
7525 }
7526
7527 static int md_unloading;
7528 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7529 {
7530         struct seq_file *seq = filp->private_data;
7531         int mask;
7532
7533         if (md_unloading)
7534                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7535         poll_wait(filp, &md_event_waiters, wait);
7536
7537         /* always allow read */
7538         mask = POLLIN | POLLRDNORM;
7539
7540         if (seq->poll_event != atomic_read(&md_event_count))
7541                 mask |= POLLERR | POLLPRI;
7542         return mask;
7543 }
7544
7545 static const struct file_operations md_seq_fops = {
7546         .owner          = THIS_MODULE,
7547         .open           = md_seq_open,
7548         .read           = seq_read,
7549         .llseek         = seq_lseek,
7550         .release        = seq_release_private,
7551         .poll           = mdstat_poll,
7552 };
7553
7554 int register_md_personality(struct md_personality *p)
7555 {
7556         pr_debug("md: %s personality registered for level %d\n",
7557                  p->name, p->level);
7558         spin_lock(&pers_lock);
7559         list_add_tail(&p->list, &pers_list);
7560         spin_unlock(&pers_lock);
7561         return 0;
7562 }
7563 EXPORT_SYMBOL(register_md_personality);
7564
7565 int unregister_md_personality(struct md_personality *p)
7566 {
7567         pr_debug("md: %s personality unregistered\n", p->name);
7568         spin_lock(&pers_lock);
7569         list_del_init(&p->list);
7570         spin_unlock(&pers_lock);
7571         return 0;
7572 }
7573 EXPORT_SYMBOL(unregister_md_personality);
7574
7575 int register_md_cluster_operations(struct md_cluster_operations *ops,
7576                                    struct module *module)
7577 {
7578         int ret = 0;
7579         spin_lock(&pers_lock);
7580         if (md_cluster_ops != NULL)
7581                 ret = -EALREADY;
7582         else {
7583                 md_cluster_ops = ops;
7584                 md_cluster_mod = module;
7585         }
7586         spin_unlock(&pers_lock);
7587         return ret;
7588 }
7589 EXPORT_SYMBOL(register_md_cluster_operations);
7590
7591 int unregister_md_cluster_operations(void)
7592 {
7593         spin_lock(&pers_lock);
7594         md_cluster_ops = NULL;
7595         spin_unlock(&pers_lock);
7596         return 0;
7597 }
7598 EXPORT_SYMBOL(unregister_md_cluster_operations);
7599
7600 int md_setup_cluster(struct mddev *mddev, int nodes)
7601 {
7602         if (!md_cluster_ops)
7603                 request_module("md-cluster");
7604         spin_lock(&pers_lock);
7605         /* ensure module won't be unloaded */
7606         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7607                 pr_warn("can't find md-cluster module or get it's reference.\n");
7608                 spin_unlock(&pers_lock);
7609                 return -ENOENT;
7610         }
7611         spin_unlock(&pers_lock);
7612
7613         return md_cluster_ops->join(mddev, nodes);
7614 }
7615
7616 void md_cluster_stop(struct mddev *mddev)
7617 {
7618         if (!md_cluster_ops)
7619                 return;
7620         md_cluster_ops->leave(mddev);
7621         module_put(md_cluster_mod);
7622 }
7623
7624 static int is_mddev_idle(struct mddev *mddev, int init)
7625 {
7626         struct md_rdev *rdev;
7627         int idle;
7628         int curr_events;
7629
7630         idle = 1;
7631         rcu_read_lock();
7632         rdev_for_each_rcu(rdev, mddev) {
7633                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7634                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7635                               (int)part_stat_read(&disk->part0, sectors[1]) -
7636                               atomic_read(&disk->sync_io);
7637                 /* sync IO will cause sync_io to increase before the disk_stats
7638                  * as sync_io is counted when a request starts, and
7639                  * disk_stats is counted when it completes.
7640                  * So resync activity will cause curr_events to be smaller than
7641                  * when there was no such activity.
7642                  * non-sync IO will cause disk_stat to increase without
7643                  * increasing sync_io so curr_events will (eventually)
7644                  * be larger than it was before.  Once it becomes
7645                  * substantially larger, the test below will cause
7646                  * the array to appear non-idle, and resync will slow
7647                  * down.
7648                  * If there is a lot of outstanding resync activity when
7649                  * we set last_event to curr_events, then all that activity
7650                  * completing might cause the array to appear non-idle
7651                  * and resync will be slowed down even though there might
7652                  * not have been non-resync activity.  This will only
7653                  * happen once though.  'last_events' will soon reflect
7654                  * the state where there is little or no outstanding
7655                  * resync requests, and further resync activity will
7656                  * always make curr_events less than last_events.
7657                  *
7658                  */
7659                 if (init || curr_events - rdev->last_events > 64) {
7660                         rdev->last_events = curr_events;
7661                         idle = 0;
7662                 }
7663         }
7664         rcu_read_unlock();
7665         return idle;
7666 }
7667
7668 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7669 {
7670         /* another "blocks" (512byte) blocks have been synced */
7671         atomic_sub(blocks, &mddev->recovery_active);
7672         wake_up(&mddev->recovery_wait);
7673         if (!ok) {
7674                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7675                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7676                 md_wakeup_thread(mddev->thread);
7677                 // stop recovery, signal do_sync ....
7678         }
7679 }
7680 EXPORT_SYMBOL(md_done_sync);
7681
7682 /* md_write_start(mddev, bi)
7683  * If we need to update some array metadata (e.g. 'active' flag
7684  * in superblock) before writing, schedule a superblock update
7685  * and wait for it to complete.
7686  */
7687 void md_write_start(struct mddev *mddev, struct bio *bi)
7688 {
7689         int did_change = 0;
7690         if (bio_data_dir(bi) != WRITE)
7691                 return;
7692
7693         BUG_ON(mddev->ro == 1);
7694         if (mddev->ro == 2) {
7695                 /* need to switch to read/write */
7696                 mddev->ro = 0;
7697                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7698                 md_wakeup_thread(mddev->thread);
7699                 md_wakeup_thread(mddev->sync_thread);
7700                 did_change = 1;
7701         }
7702         atomic_inc(&mddev->writes_pending);
7703         if (mddev->safemode == 1)
7704                 mddev->safemode = 0;
7705         if (mddev->in_sync) {
7706                 spin_lock(&mddev->lock);
7707                 if (mddev->in_sync) {
7708                         mddev->in_sync = 0;
7709                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7710                         set_bit(MD_CHANGE_PENDING, &mddev->flags);
7711                         md_wakeup_thread(mddev->thread);
7712                         did_change = 1;
7713                 }
7714                 spin_unlock(&mddev->lock);
7715         }
7716         if (did_change)
7717                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7718         wait_event(mddev->sb_wait,
7719                    !test_bit(MD_CHANGE_PENDING, &mddev->flags));
7720 }
7721 EXPORT_SYMBOL(md_write_start);
7722
7723 void md_write_end(struct mddev *mddev)
7724 {
7725         if (atomic_dec_and_test(&mddev->writes_pending)) {
7726                 if (mddev->safemode == 2)
7727                         md_wakeup_thread(mddev->thread);
7728                 else if (mddev->safemode_delay)
7729                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7730         }
7731 }
7732 EXPORT_SYMBOL(md_write_end);
7733
7734 /* md_allow_write(mddev)
7735  * Calling this ensures that the array is marked 'active' so that writes
7736  * may proceed without blocking.  It is important to call this before
7737  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7738  * Must be called with mddev_lock held.
7739  *
7740  * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock
7741  * is dropped, so return -EAGAIN after notifying userspace.
7742  */
7743 int md_allow_write(struct mddev *mddev)
7744 {
7745         if (!mddev->pers)
7746                 return 0;
7747         if (mddev->ro)
7748                 return 0;
7749         if (!mddev->pers->sync_request)
7750                 return 0;
7751
7752         spin_lock(&mddev->lock);
7753         if (mddev->in_sync) {
7754                 mddev->in_sync = 0;
7755                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
7756                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
7757                 if (mddev->safemode_delay &&
7758                     mddev->safemode == 0)
7759                         mddev->safemode = 1;
7760                 spin_unlock(&mddev->lock);
7761                 md_update_sb(mddev, 0);
7762                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7763         } else
7764                 spin_unlock(&mddev->lock);
7765
7766         if (test_bit(MD_CHANGE_PENDING, &mddev->flags))
7767                 return -EAGAIN;
7768         else
7769                 return 0;
7770 }
7771 EXPORT_SYMBOL_GPL(md_allow_write);
7772
7773 #define SYNC_MARKS      10
7774 #define SYNC_MARK_STEP  (3*HZ)
7775 #define UPDATE_FREQUENCY (5*60*HZ)
7776 void md_do_sync(struct md_thread *thread)
7777 {
7778         struct mddev *mddev = thread->mddev;
7779         struct mddev *mddev2;
7780         unsigned int currspeed = 0,
7781                  window;
7782         sector_t max_sectors,j, io_sectors, recovery_done;
7783         unsigned long mark[SYNC_MARKS];
7784         unsigned long update_time;
7785         sector_t mark_cnt[SYNC_MARKS];
7786         int last_mark,m;
7787         struct list_head *tmp;
7788         sector_t last_check;
7789         int skipped = 0;
7790         struct md_rdev *rdev;
7791         char *desc, *action = NULL;
7792         struct blk_plug plug;
7793         int ret;
7794
7795         /* just incase thread restarts... */
7796         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7797                 return;
7798         if (mddev->ro) {/* never try to sync a read-only array */
7799                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7800                 return;
7801         }
7802
7803         if (mddev_is_clustered(mddev)) {
7804                 ret = md_cluster_ops->resync_start(mddev);
7805                 if (ret)
7806                         goto skip;
7807
7808                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
7809                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7810                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
7811                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
7812                      && ((unsigned long long)mddev->curr_resync_completed
7813                          < (unsigned long long)mddev->resync_max_sectors))
7814                         goto skip;
7815         }
7816
7817         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7818                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7819                         desc = "data-check";
7820                         action = "check";
7821                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7822                         desc = "requested-resync";
7823                         action = "repair";
7824                 } else
7825                         desc = "resync";
7826         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7827                 desc = "reshape";
7828         else
7829                 desc = "recovery";
7830
7831         mddev->last_sync_action = action ?: desc;
7832
7833         /* we overload curr_resync somewhat here.
7834          * 0 == not engaged in resync at all
7835          * 2 == checking that there is no conflict with another sync
7836          * 1 == like 2, but have yielded to allow conflicting resync to
7837          *              commense
7838          * other == active in resync - this many blocks
7839          *
7840          * Before starting a resync we must have set curr_resync to
7841          * 2, and then checked that every "conflicting" array has curr_resync
7842          * less than ours.  When we find one that is the same or higher
7843          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7844          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7845          * This will mean we have to start checking from the beginning again.
7846          *
7847          */
7848
7849         do {
7850                 int mddev2_minor = -1;
7851                 mddev->curr_resync = 2;
7852
7853         try_again:
7854                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7855                         goto skip;
7856                 for_each_mddev(mddev2, tmp) {
7857                         if (mddev2 == mddev)
7858                                 continue;
7859                         if (!mddev->parallel_resync
7860                         &&  mddev2->curr_resync
7861                         &&  match_mddev_units(mddev, mddev2)) {
7862                                 DEFINE_WAIT(wq);
7863                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7864                                         /* arbitrarily yield */
7865                                         mddev->curr_resync = 1;
7866                                         wake_up(&resync_wait);
7867                                 }
7868                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7869                                         /* no need to wait here, we can wait the next
7870                                          * time 'round when curr_resync == 2
7871                                          */
7872                                         continue;
7873                                 /* We need to wait 'interruptible' so as not to
7874                                  * contribute to the load average, and not to
7875                                  * be caught by 'softlockup'
7876                                  */
7877                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7878                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7879                                     mddev2->curr_resync >= mddev->curr_resync) {
7880                                         if (mddev2_minor != mddev2->md_minor) {
7881                                                 mddev2_minor = mddev2->md_minor;
7882                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
7883                                                         desc, mdname(mddev),
7884                                                         mdname(mddev2));
7885                                         }
7886                                         mddev_put(mddev2);
7887                                         if (signal_pending(current))
7888                                                 flush_signals(current);
7889                                         schedule();
7890                                         finish_wait(&resync_wait, &wq);
7891                                         goto try_again;
7892                                 }
7893                                 finish_wait(&resync_wait, &wq);
7894                         }
7895                 }
7896         } while (mddev->curr_resync < 2);
7897
7898         j = 0;
7899         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7900                 /* resync follows the size requested by the personality,
7901                  * which defaults to physical size, but can be virtual size
7902                  */
7903                 max_sectors = mddev->resync_max_sectors;
7904                 atomic64_set(&mddev->resync_mismatches, 0);
7905                 /* we don't use the checkpoint if there's a bitmap */
7906                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7907                         j = mddev->resync_min;
7908                 else if (!mddev->bitmap)
7909                         j = mddev->recovery_cp;
7910
7911         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7912                 max_sectors = mddev->resync_max_sectors;
7913         else {
7914                 /* recovery follows the physical size of devices */
7915                 max_sectors = mddev->dev_sectors;
7916                 j = MaxSector;
7917                 rcu_read_lock();
7918                 rdev_for_each_rcu(rdev, mddev)
7919                         if (rdev->raid_disk >= 0 &&
7920                             !test_bit(Journal, &rdev->flags) &&
7921                             !test_bit(Faulty, &rdev->flags) &&
7922                             !test_bit(In_sync, &rdev->flags) &&
7923                             rdev->recovery_offset < j)
7924                                 j = rdev->recovery_offset;
7925                 rcu_read_unlock();
7926
7927                 /* If there is a bitmap, we need to make sure all
7928                  * writes that started before we added a spare
7929                  * complete before we start doing a recovery.
7930                  * Otherwise the write might complete and (via
7931                  * bitmap_endwrite) set a bit in the bitmap after the
7932                  * recovery has checked that bit and skipped that
7933                  * region.
7934                  */
7935                 if (mddev->bitmap) {
7936                         mddev->pers->quiesce(mddev, 1);
7937                         mddev->pers->quiesce(mddev, 0);
7938                 }
7939         }
7940
7941         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
7942         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
7943         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
7944                  speed_max(mddev), desc);
7945
7946         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
7947
7948         io_sectors = 0;
7949         for (m = 0; m < SYNC_MARKS; m++) {
7950                 mark[m] = jiffies;
7951                 mark_cnt[m] = io_sectors;
7952         }
7953         last_mark = 0;
7954         mddev->resync_mark = mark[last_mark];
7955         mddev->resync_mark_cnt = mark_cnt[last_mark];
7956
7957         /*
7958          * Tune reconstruction:
7959          */
7960         window = 32*(PAGE_SIZE/512);
7961         pr_debug("md: using %dk window, over a total of %lluk.\n",
7962                  window/2, (unsigned long long)max_sectors/2);
7963
7964         atomic_set(&mddev->recovery_active, 0);
7965         last_check = 0;
7966
7967         if (j>2) {
7968                 pr_debug("md: resuming %s of %s from checkpoint.\n",
7969                          desc, mdname(mddev));
7970                 mddev->curr_resync = j;
7971         } else
7972                 mddev->curr_resync = 3; /* no longer delayed */
7973         mddev->curr_resync_completed = j;
7974         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
7975         md_new_event(mddev);
7976         update_time = jiffies;
7977
7978         blk_start_plug(&plug);
7979         while (j < max_sectors) {
7980                 sector_t sectors;
7981
7982                 skipped = 0;
7983
7984                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
7985                     ((mddev->curr_resync > mddev->curr_resync_completed &&
7986                       (mddev->curr_resync - mddev->curr_resync_completed)
7987                       > (max_sectors >> 4)) ||
7988                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
7989                      (j - mddev->curr_resync_completed)*2
7990                      >= mddev->resync_max - mddev->curr_resync_completed ||
7991                      mddev->curr_resync_completed > mddev->resync_max
7992                             )) {
7993                         /* time to update curr_resync_completed */
7994                         wait_event(mddev->recovery_wait,
7995                                    atomic_read(&mddev->recovery_active) == 0);
7996                         mddev->curr_resync_completed = j;
7997                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
7998                             j > mddev->recovery_cp)
7999                                 mddev->recovery_cp = j;
8000                         update_time = jiffies;
8001                         set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8002                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8003                 }
8004
8005                 while (j >= mddev->resync_max &&
8006                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8007                         /* As this condition is controlled by user-space,
8008                          * we can block indefinitely, so use '_interruptible'
8009                          * to avoid triggering warnings.
8010                          */
8011                         flush_signals(current); /* just in case */
8012                         wait_event_interruptible(mddev->recovery_wait,
8013                                                  mddev->resync_max > j
8014                                                  || test_bit(MD_RECOVERY_INTR,
8015                                                              &mddev->recovery));
8016                 }
8017
8018                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8019                         break;
8020
8021                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8022                 if (sectors == 0) {
8023                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8024                         break;
8025                 }
8026
8027                 if (!skipped) { /* actual IO requested */
8028                         io_sectors += sectors;
8029                         atomic_add(sectors, &mddev->recovery_active);
8030                 }
8031
8032                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8033                         break;
8034
8035                 j += sectors;
8036                 if (j > max_sectors)
8037                         /* when skipping, extra large numbers can be returned. */
8038                         j = max_sectors;
8039                 if (j > 2)
8040                         mddev->curr_resync = j;
8041                 mddev->curr_mark_cnt = io_sectors;
8042                 if (last_check == 0)
8043                         /* this is the earliest that rebuild will be
8044                          * visible in /proc/mdstat
8045                          */
8046                         md_new_event(mddev);
8047
8048                 if (last_check + window > io_sectors || j == max_sectors)
8049                         continue;
8050
8051                 last_check = io_sectors;
8052         repeat:
8053                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8054                         /* step marks */
8055                         int next = (last_mark+1) % SYNC_MARKS;
8056
8057                         mddev->resync_mark = mark[next];
8058                         mddev->resync_mark_cnt = mark_cnt[next];
8059                         mark[next] = jiffies;
8060                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8061                         last_mark = next;
8062                 }
8063
8064                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8065                         break;
8066
8067                 /*
8068                  * this loop exits only if either when we are slower than
8069                  * the 'hard' speed limit, or the system was IO-idle for
8070                  * a jiffy.
8071                  * the system might be non-idle CPU-wise, but we only care
8072                  * about not overloading the IO subsystem. (things like an
8073                  * e2fsck being done on the RAID array should execute fast)
8074                  */
8075                 cond_resched();
8076
8077                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8078                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8079                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8080
8081                 if (currspeed > speed_min(mddev)) {
8082                         if (currspeed > speed_max(mddev)) {
8083                                 msleep(500);
8084                                 goto repeat;
8085                         }
8086                         if (!is_mddev_idle(mddev, 0)) {
8087                                 /*
8088                                  * Give other IO more of a chance.
8089                                  * The faster the devices, the less we wait.
8090                                  */
8091                                 wait_event(mddev->recovery_wait,
8092                                            !atomic_read(&mddev->recovery_active));
8093                         }
8094                 }
8095         }
8096         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8097                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8098                 ? "interrupted" : "done");
8099         /*
8100          * this also signals 'finished resyncing' to md_stop
8101          */
8102         blk_finish_plug(&plug);
8103         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8104
8105         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8106             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8107             mddev->curr_resync > 3) {
8108                 mddev->curr_resync_completed = mddev->curr_resync;
8109                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8110         }
8111         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8112
8113         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8114             mddev->curr_resync > 3) {
8115                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8116                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8117                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8118                                         pr_debug("md: checkpointing %s of %s.\n",
8119                                                  desc, mdname(mddev));
8120                                         if (test_bit(MD_RECOVERY_ERROR,
8121                                                 &mddev->recovery))
8122                                                 mddev->recovery_cp =
8123                                                         mddev->curr_resync_completed;
8124                                         else
8125                                                 mddev->recovery_cp =
8126                                                         mddev->curr_resync;
8127                                 }
8128                         } else
8129                                 mddev->recovery_cp = MaxSector;
8130                 } else {
8131                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8132                                 mddev->curr_resync = MaxSector;
8133                         rcu_read_lock();
8134                         rdev_for_each_rcu(rdev, mddev)
8135                                 if (rdev->raid_disk >= 0 &&
8136                                     mddev->delta_disks >= 0 &&
8137                                     !test_bit(Journal, &rdev->flags) &&
8138                                     !test_bit(Faulty, &rdev->flags) &&
8139                                     !test_bit(In_sync, &rdev->flags) &&
8140                                     rdev->recovery_offset < mddev->curr_resync)
8141                                         rdev->recovery_offset = mddev->curr_resync;
8142                         rcu_read_unlock();
8143                 }
8144         }
8145  skip:
8146         /* set CHANGE_PENDING here since maybe another update is needed,
8147          * so other nodes are informed. It should be harmless for normal
8148          * raid */
8149         set_mask_bits(&mddev->flags, 0,
8150                       BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
8151
8152         spin_lock(&mddev->lock);
8153         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8154                 /* We completed so min/max setting can be forgotten if used. */
8155                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8156                         mddev->resync_min = 0;
8157                 mddev->resync_max = MaxSector;
8158         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8159                 mddev->resync_min = mddev->curr_resync_completed;
8160         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8161         mddev->curr_resync = 0;
8162         spin_unlock(&mddev->lock);
8163
8164         wake_up(&resync_wait);
8165         md_wakeup_thread(mddev->thread);
8166         return;
8167 }
8168 EXPORT_SYMBOL_GPL(md_do_sync);
8169
8170 static int remove_and_add_spares(struct mddev *mddev,
8171                                  struct md_rdev *this)
8172 {
8173         struct md_rdev *rdev;
8174         int spares = 0;
8175         int removed = 0;
8176         bool remove_some = false;
8177
8178         rdev_for_each(rdev, mddev) {
8179                 if ((this == NULL || rdev == this) &&
8180                     rdev->raid_disk >= 0 &&
8181                     !test_bit(Blocked, &rdev->flags) &&
8182                     test_bit(Faulty, &rdev->flags) &&
8183                     atomic_read(&rdev->nr_pending)==0) {
8184                         /* Faulty non-Blocked devices with nr_pending == 0
8185                          * never get nr_pending incremented,
8186                          * never get Faulty cleared, and never get Blocked set.
8187                          * So we can synchronize_rcu now rather than once per device
8188                          */
8189                         remove_some = true;
8190                         set_bit(RemoveSynchronized, &rdev->flags);
8191                 }
8192         }
8193
8194         if (remove_some)
8195                 synchronize_rcu();
8196         rdev_for_each(rdev, mddev) {
8197                 if ((this == NULL || rdev == this) &&
8198                     rdev->raid_disk >= 0 &&
8199                     !test_bit(Blocked, &rdev->flags) &&
8200                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
8201                      (!test_bit(In_sync, &rdev->flags) &&
8202                       !test_bit(Journal, &rdev->flags))) &&
8203                     atomic_read(&rdev->nr_pending)==0)) {
8204                         if (mddev->pers->hot_remove_disk(
8205                                     mddev, rdev) == 0) {
8206                                 sysfs_unlink_rdev(mddev, rdev);
8207                                 rdev->raid_disk = -1;
8208                                 removed++;
8209                         }
8210                 }
8211                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8212                         clear_bit(RemoveSynchronized, &rdev->flags);
8213         }
8214
8215         if (removed && mddev->kobj.sd)
8216                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8217
8218         if (this && removed)
8219                 goto no_add;
8220
8221         rdev_for_each(rdev, mddev) {
8222                 if (this && this != rdev)
8223                         continue;
8224                 if (test_bit(Candidate, &rdev->flags))
8225                         continue;
8226                 if (rdev->raid_disk >= 0 &&
8227                     !test_bit(In_sync, &rdev->flags) &&
8228                     !test_bit(Journal, &rdev->flags) &&
8229                     !test_bit(Faulty, &rdev->flags))
8230                         spares++;
8231                 if (rdev->raid_disk >= 0)
8232                         continue;
8233                 if (test_bit(Faulty, &rdev->flags))
8234                         continue;
8235                 if (!test_bit(Journal, &rdev->flags)) {
8236                         if (mddev->ro &&
8237                             ! (rdev->saved_raid_disk >= 0 &&
8238                                !test_bit(Bitmap_sync, &rdev->flags)))
8239                                 continue;
8240
8241                         rdev->recovery_offset = 0;
8242                 }
8243                 if (mddev->pers->
8244                     hot_add_disk(mddev, rdev) == 0) {
8245                         if (sysfs_link_rdev(mddev, rdev))
8246                                 /* failure here is OK */;
8247                         if (!test_bit(Journal, &rdev->flags))
8248                                 spares++;
8249                         md_new_event(mddev);
8250                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8251                 }
8252         }
8253 no_add:
8254         if (removed)
8255                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
8256         return spares;
8257 }
8258
8259 static void md_start_sync(struct work_struct *ws)
8260 {
8261         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8262
8263         mddev->sync_thread = md_register_thread(md_do_sync,
8264                                                 mddev,
8265                                                 "resync");
8266         if (!mddev->sync_thread) {
8267                 pr_warn("%s: could not start resync thread...\n",
8268                         mdname(mddev));
8269                 /* leave the spares where they are, it shouldn't hurt */
8270                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8271                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8272                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8273                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8274                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8275                 wake_up(&resync_wait);
8276                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8277                                        &mddev->recovery))
8278                         if (mddev->sysfs_action)
8279                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8280         } else
8281                 md_wakeup_thread(mddev->sync_thread);
8282         sysfs_notify_dirent_safe(mddev->sysfs_action);
8283         md_new_event(mddev);
8284 }
8285
8286 /*
8287  * This routine is regularly called by all per-raid-array threads to
8288  * deal with generic issues like resync and super-block update.
8289  * Raid personalities that don't have a thread (linear/raid0) do not
8290  * need this as they never do any recovery or update the superblock.
8291  *
8292  * It does not do any resync itself, but rather "forks" off other threads
8293  * to do that as needed.
8294  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8295  * "->recovery" and create a thread at ->sync_thread.
8296  * When the thread finishes it sets MD_RECOVERY_DONE
8297  * and wakeups up this thread which will reap the thread and finish up.
8298  * This thread also removes any faulty devices (with nr_pending == 0).
8299  *
8300  * The overall approach is:
8301  *  1/ if the superblock needs updating, update it.
8302  *  2/ If a recovery thread is running, don't do anything else.
8303  *  3/ If recovery has finished, clean up, possibly marking spares active.
8304  *  4/ If there are any faulty devices, remove them.
8305  *  5/ If array is degraded, try to add spares devices
8306  *  6/ If array has spares or is not in-sync, start a resync thread.
8307  */
8308 void md_check_recovery(struct mddev *mddev)
8309 {
8310         if (mddev->suspended)
8311                 return;
8312
8313         if (mddev->bitmap)
8314                 bitmap_daemon_work(mddev);
8315
8316         if (signal_pending(current)) {
8317                 if (mddev->pers->sync_request && !mddev->external) {
8318                         pr_debug("md: %s in immediate safe mode\n",
8319                                  mdname(mddev));
8320                         mddev->safemode = 2;
8321                 }
8322                 flush_signals(current);
8323         }
8324
8325         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8326                 return;
8327         if ( ! (
8328                 (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
8329                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8330                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8331                 test_bit(MD_RELOAD_SB, &mddev->flags) ||
8332                 (mddev->external == 0 && mddev->safemode == 1) ||
8333                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8334                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8335                 ))
8336                 return;
8337
8338         if (mddev_trylock(mddev)) {
8339                 int spares = 0;
8340
8341                 if (mddev->ro) {
8342                         struct md_rdev *rdev;
8343                         if (!mddev->external && mddev->in_sync)
8344                                 /* 'Blocked' flag not needed as failed devices
8345                                  * will be recorded if array switched to read/write.
8346                                  * Leaving it set will prevent the device
8347                                  * from being removed.
8348                                  */
8349                                 rdev_for_each(rdev, mddev)
8350                                         clear_bit(Blocked, &rdev->flags);
8351                         /* On a read-only array we can:
8352                          * - remove failed devices
8353                          * - add already-in_sync devices if the array itself
8354                          *   is in-sync.
8355                          * As we only add devices that are already in-sync,
8356                          * we can activate the spares immediately.
8357                          */
8358                         remove_and_add_spares(mddev, NULL);
8359                         /* There is no thread, but we need to call
8360                          * ->spare_active and clear saved_raid_disk
8361                          */
8362                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8363                         md_reap_sync_thread(mddev);
8364                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8365                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8366                         clear_bit(MD_CHANGE_PENDING, &mddev->flags);
8367                         goto unlock;
8368                 }
8369
8370                 if (mddev_is_clustered(mddev)) {
8371                         struct md_rdev *rdev;
8372                         /* kick the device if another node issued a
8373                          * remove disk.
8374                          */
8375                         rdev_for_each(rdev, mddev) {
8376                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8377                                                 rdev->raid_disk < 0)
8378                                         md_kick_rdev_from_array(rdev);
8379                         }
8380
8381                         if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8382                                 md_reload_sb(mddev, mddev->good_device_nr);
8383                 }
8384
8385                 if (!mddev->external) {
8386                         int did_change = 0;
8387                         spin_lock(&mddev->lock);
8388                         if (mddev->safemode &&
8389                             !atomic_read(&mddev->writes_pending) &&
8390                             !mddev->in_sync &&
8391                             mddev->recovery_cp == MaxSector) {
8392                                 mddev->in_sync = 1;
8393                                 did_change = 1;
8394                                 set_bit(MD_CHANGE_CLEAN, &mddev->flags);
8395                         }
8396                         if (mddev->safemode == 1)
8397                                 mddev->safemode = 0;
8398                         spin_unlock(&mddev->lock);
8399                         if (did_change)
8400                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8401                 }
8402
8403                 if (mddev->flags & MD_UPDATE_SB_FLAGS)
8404                         md_update_sb(mddev, 0);
8405
8406                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8407                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8408                         /* resync/recovery still happening */
8409                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8410                         goto unlock;
8411                 }
8412                 if (mddev->sync_thread) {
8413                         md_reap_sync_thread(mddev);
8414                         goto unlock;
8415                 }
8416                 /* Set RUNNING before clearing NEEDED to avoid
8417                  * any transients in the value of "sync_action".
8418                  */
8419                 mddev->curr_resync_completed = 0;
8420                 spin_lock(&mddev->lock);
8421                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8422                 spin_unlock(&mddev->lock);
8423                 /* Clear some bits that don't mean anything, but
8424                  * might be left set
8425                  */
8426                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8427                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8428
8429                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8430                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8431                         goto not_running;
8432                 /* no recovery is running.
8433                  * remove any failed drives, then
8434                  * add spares if possible.
8435                  * Spares are also removed and re-added, to allow
8436                  * the personality to fail the re-add.
8437                  */
8438
8439                 if (mddev->reshape_position != MaxSector) {
8440                         if (mddev->pers->check_reshape == NULL ||
8441                             mddev->pers->check_reshape(mddev) != 0)
8442                                 /* Cannot proceed */
8443                                 goto not_running;
8444                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8445                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8446                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8447                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8448                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8449                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8450                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8451                 } else if (mddev->recovery_cp < MaxSector) {
8452                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8453                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8454                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8455                         /* nothing to be done ... */
8456                         goto not_running;
8457
8458                 if (mddev->pers->sync_request) {
8459                         if (spares) {
8460                                 /* We are adding a device or devices to an array
8461                                  * which has the bitmap stored on all devices.
8462                                  * So make sure all bitmap pages get written
8463                                  */
8464                                 bitmap_write_all(mddev->bitmap);
8465                         }
8466                         INIT_WORK(&mddev->del_work, md_start_sync);
8467                         queue_work(md_misc_wq, &mddev->del_work);
8468                         goto unlock;
8469                 }
8470         not_running:
8471                 if (!mddev->sync_thread) {
8472                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8473                         wake_up(&resync_wait);
8474                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8475                                                &mddev->recovery))
8476                                 if (mddev->sysfs_action)
8477                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8478                 }
8479         unlock:
8480                 wake_up(&mddev->sb_wait);
8481                 mddev_unlock(mddev);
8482         }
8483 }
8484 EXPORT_SYMBOL(md_check_recovery);
8485
8486 void md_reap_sync_thread(struct mddev *mddev)
8487 {
8488         struct md_rdev *rdev;
8489
8490         /* resync has finished, collect result */
8491         md_unregister_thread(&mddev->sync_thread);
8492         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8493             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8494                 /* success...*/
8495                 /* activate any spares */
8496                 if (mddev->pers->spare_active(mddev)) {
8497                         sysfs_notify(&mddev->kobj, NULL,
8498                                      "degraded");
8499                         set_bit(MD_CHANGE_DEVS, &mddev->flags);
8500                 }
8501         }
8502         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8503             mddev->pers->finish_reshape)
8504                 mddev->pers->finish_reshape(mddev);
8505
8506         /* If array is no-longer degraded, then any saved_raid_disk
8507          * information must be scrapped.
8508          */
8509         if (!mddev->degraded)
8510                 rdev_for_each(rdev, mddev)
8511                         rdev->saved_raid_disk = -1;
8512
8513         md_update_sb(mddev, 1);
8514         /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can
8515          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8516          * clustered raid */
8517         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8518                 md_cluster_ops->resync_finish(mddev);
8519         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8520         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8521         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8522         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8523         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8524         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8525         wake_up(&resync_wait);
8526         /* flag recovery needed just to double check */
8527         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8528         sysfs_notify_dirent_safe(mddev->sysfs_action);
8529         md_new_event(mddev);
8530         if (mddev->event_work.func)
8531                 queue_work(md_misc_wq, &mddev->event_work);
8532 }
8533 EXPORT_SYMBOL(md_reap_sync_thread);
8534
8535 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8536 {
8537         sysfs_notify_dirent_safe(rdev->sysfs_state);
8538         wait_event_timeout(rdev->blocked_wait,
8539                            !test_bit(Blocked, &rdev->flags) &&
8540                            !test_bit(BlockedBadBlocks, &rdev->flags),
8541                            msecs_to_jiffies(5000));
8542         rdev_dec_pending(rdev, mddev);
8543 }
8544 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8545
8546 void md_finish_reshape(struct mddev *mddev)
8547 {
8548         /* called be personality module when reshape completes. */
8549         struct md_rdev *rdev;
8550
8551         rdev_for_each(rdev, mddev) {
8552                 if (rdev->data_offset > rdev->new_data_offset)
8553                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8554                 else
8555                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8556                 rdev->data_offset = rdev->new_data_offset;
8557         }
8558 }
8559 EXPORT_SYMBOL(md_finish_reshape);
8560
8561 /* Bad block management */
8562
8563 /* Returns 1 on success, 0 on failure */
8564 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8565                        int is_new)
8566 {
8567         struct mddev *mddev = rdev->mddev;
8568         int rv;
8569         if (is_new)
8570                 s += rdev->new_data_offset;
8571         else
8572                 s += rdev->data_offset;
8573         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8574         if (rv == 0) {
8575                 /* Make sure they get written out promptly */
8576                 if (test_bit(ExternalBbl, &rdev->flags))
8577                         sysfs_notify(&rdev->kobj, NULL,
8578                                      "unacknowledged_bad_blocks");
8579                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8580                 set_mask_bits(&mddev->flags, 0,
8581                               BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
8582                 md_wakeup_thread(rdev->mddev->thread);
8583                 return 1;
8584         } else
8585                 return 0;
8586 }
8587 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8588
8589 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8590                          int is_new)
8591 {
8592         int rv;
8593         if (is_new)
8594                 s += rdev->new_data_offset;
8595         else
8596                 s += rdev->data_offset;
8597         rv = badblocks_clear(&rdev->badblocks, s, sectors);
8598         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
8599                 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
8600         return rv;
8601 }
8602 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8603
8604 static int md_notify_reboot(struct notifier_block *this,
8605                             unsigned long code, void *x)
8606 {
8607         struct list_head *tmp;
8608         struct mddev *mddev;
8609         int need_delay = 0;
8610
8611         for_each_mddev(mddev, tmp) {
8612                 if (mddev_trylock(mddev)) {
8613                         if (mddev->pers)
8614                                 __md_stop_writes(mddev);
8615                         if (mddev->persistent)
8616                                 mddev->safemode = 2;
8617                         mddev_unlock(mddev);
8618                 }
8619                 need_delay = 1;
8620         }
8621         /*
8622          * certain more exotic SCSI devices are known to be
8623          * volatile wrt too early system reboots. While the
8624          * right place to handle this issue is the given
8625          * driver, we do want to have a safe RAID driver ...
8626          */
8627         if (need_delay)
8628                 mdelay(1000*1);
8629
8630         return NOTIFY_DONE;
8631 }
8632
8633 static struct notifier_block md_notifier = {
8634         .notifier_call  = md_notify_reboot,
8635         .next           = NULL,
8636         .priority       = INT_MAX, /* before any real devices */
8637 };
8638
8639 static void md_geninit(void)
8640 {
8641         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8642
8643         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8644 }
8645
8646 static int __init md_init(void)
8647 {
8648         int ret = -ENOMEM;
8649
8650         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8651         if (!md_wq)
8652                 goto err_wq;
8653
8654         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8655         if (!md_misc_wq)
8656                 goto err_misc_wq;
8657
8658         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8659                 goto err_md;
8660
8661         if ((ret = register_blkdev(0, "mdp")) < 0)
8662                 goto err_mdp;
8663         mdp_major = ret;
8664
8665         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8666                             md_probe, NULL, NULL);
8667         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8668                             md_probe, NULL, NULL);
8669
8670         register_reboot_notifier(&md_notifier);
8671         raid_table_header = register_sysctl_table(raid_root_table);
8672
8673         md_geninit();
8674         return 0;
8675
8676 err_mdp:
8677         unregister_blkdev(MD_MAJOR, "md");
8678 err_md:
8679         destroy_workqueue(md_misc_wq);
8680 err_misc_wq:
8681         destroy_workqueue(md_wq);
8682 err_wq:
8683         return ret;
8684 }
8685
8686 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8687 {
8688         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8689         struct md_rdev *rdev2;
8690         int role, ret;
8691         char b[BDEVNAME_SIZE];
8692
8693         /* Check for change of roles in the active devices */
8694         rdev_for_each(rdev2, mddev) {
8695                 if (test_bit(Faulty, &rdev2->flags))
8696                         continue;
8697
8698                 /* Check if the roles changed */
8699                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8700
8701                 if (test_bit(Candidate, &rdev2->flags)) {
8702                         if (role == 0xfffe) {
8703                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8704                                 md_kick_rdev_from_array(rdev2);
8705                                 continue;
8706                         }
8707                         else
8708                                 clear_bit(Candidate, &rdev2->flags);
8709                 }
8710
8711                 if (role != rdev2->raid_disk) {
8712                         /* got activated */
8713                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8714                                 rdev2->saved_raid_disk = role;
8715                                 ret = remove_and_add_spares(mddev, rdev2);
8716                                 pr_info("Activated spare: %s\n",
8717                                         bdevname(rdev2->bdev,b));
8718                                 /* wakeup mddev->thread here, so array could
8719                                  * perform resync with the new activated disk */
8720                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8721                                 md_wakeup_thread(mddev->thread);
8722
8723                         }
8724                         /* device faulty
8725                          * We just want to do the minimum to mark the disk
8726                          * as faulty. The recovery is performed by the
8727                          * one who initiated the error.
8728                          */
8729                         if ((role == 0xfffe) || (role == 0xfffd)) {
8730                                 md_error(mddev, rdev2);
8731                                 clear_bit(Blocked, &rdev2->flags);
8732                         }
8733                 }
8734         }
8735
8736         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8737                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8738
8739         /* Finally set the event to be up to date */
8740         mddev->events = le64_to_cpu(sb->events);
8741 }
8742
8743 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8744 {
8745         int err;
8746         struct page *swapout = rdev->sb_page;
8747         struct mdp_superblock_1 *sb;
8748
8749         /* Store the sb page of the rdev in the swapout temporary
8750          * variable in case we err in the future
8751          */
8752         rdev->sb_page = NULL;
8753         err = alloc_disk_sb(rdev);
8754         if (err == 0) {
8755                 ClearPageUptodate(rdev->sb_page);
8756                 rdev->sb_loaded = 0;
8757                 err = super_types[mddev->major_version].
8758                         load_super(rdev, NULL, mddev->minor_version);
8759         }
8760         if (err < 0) {
8761                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8762                                 __func__, __LINE__, rdev->desc_nr, err);
8763                 if (rdev->sb_page)
8764                         put_page(rdev->sb_page);
8765                 rdev->sb_page = swapout;
8766                 rdev->sb_loaded = 1;
8767                 return err;
8768         }
8769
8770         sb = page_address(rdev->sb_page);
8771         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8772          * is not set
8773          */
8774
8775         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8776                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8777
8778         /* The other node finished recovery, call spare_active to set
8779          * device In_sync and mddev->degraded
8780          */
8781         if (rdev->recovery_offset == MaxSector &&
8782             !test_bit(In_sync, &rdev->flags) &&
8783             mddev->pers->spare_active(mddev))
8784                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8785
8786         put_page(swapout);
8787         return 0;
8788 }
8789
8790 void md_reload_sb(struct mddev *mddev, int nr)
8791 {
8792         struct md_rdev *rdev;
8793         int err;
8794
8795         /* Find the rdev */
8796         rdev_for_each_rcu(rdev, mddev) {
8797                 if (rdev->desc_nr == nr)
8798                         break;
8799         }
8800
8801         if (!rdev || rdev->desc_nr != nr) {
8802                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8803                 return;
8804         }
8805
8806         err = read_rdev(mddev, rdev);
8807         if (err < 0)
8808                 return;
8809
8810         check_sb_changes(mddev, rdev);
8811
8812         /* Read all rdev's to update recovery_offset */
8813         rdev_for_each_rcu(rdev, mddev)
8814                 read_rdev(mddev, rdev);
8815 }
8816 EXPORT_SYMBOL(md_reload_sb);
8817
8818 #ifndef MODULE
8819
8820 /*
8821  * Searches all registered partitions for autorun RAID arrays
8822  * at boot time.
8823  */
8824
8825 static DEFINE_MUTEX(detected_devices_mutex);
8826 static LIST_HEAD(all_detected_devices);
8827 struct detected_devices_node {
8828         struct list_head list;
8829         dev_t dev;
8830 };
8831
8832 void md_autodetect_dev(dev_t dev)
8833 {
8834         struct detected_devices_node *node_detected_dev;
8835
8836         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8837         if (node_detected_dev) {
8838                 node_detected_dev->dev = dev;
8839                 mutex_lock(&detected_devices_mutex);
8840                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8841                 mutex_unlock(&detected_devices_mutex);
8842         }
8843 }
8844
8845 static void autostart_arrays(int part)
8846 {
8847         struct md_rdev *rdev;
8848         struct detected_devices_node *node_detected_dev;
8849         dev_t dev;
8850         int i_scanned, i_passed;
8851
8852         i_scanned = 0;
8853         i_passed = 0;
8854
8855         pr_info("md: Autodetecting RAID arrays.\n");
8856
8857         mutex_lock(&detected_devices_mutex);
8858         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8859                 i_scanned++;
8860                 node_detected_dev = list_entry(all_detected_devices.next,
8861                                         struct detected_devices_node, list);
8862                 list_del(&node_detected_dev->list);
8863                 dev = node_detected_dev->dev;
8864                 kfree(node_detected_dev);
8865                 mutex_unlock(&detected_devices_mutex);
8866                 rdev = md_import_device(dev,0, 90);
8867                 mutex_lock(&detected_devices_mutex);
8868                 if (IS_ERR(rdev))
8869                         continue;
8870
8871                 if (test_bit(Faulty, &rdev->flags))
8872                         continue;
8873
8874                 set_bit(AutoDetected, &rdev->flags);
8875                 list_add(&rdev->same_set, &pending_raid_disks);
8876                 i_passed++;
8877         }
8878         mutex_unlock(&detected_devices_mutex);
8879
8880         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
8881
8882         autorun_devices(part);
8883 }
8884
8885 #endif /* !MODULE */
8886
8887 static __exit void md_exit(void)
8888 {
8889         struct mddev *mddev;
8890         struct list_head *tmp;
8891         int delay = 1;
8892
8893         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8894         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8895
8896         unregister_blkdev(MD_MAJOR,"md");
8897         unregister_blkdev(mdp_major, "mdp");
8898         unregister_reboot_notifier(&md_notifier);
8899         unregister_sysctl_table(raid_table_header);
8900
8901         /* We cannot unload the modules while some process is
8902          * waiting for us in select() or poll() - wake them up
8903          */
8904         md_unloading = 1;
8905         while (waitqueue_active(&md_event_waiters)) {
8906                 /* not safe to leave yet */
8907                 wake_up(&md_event_waiters);
8908                 msleep(delay);
8909                 delay += delay;
8910         }
8911         remove_proc_entry("mdstat", NULL);
8912
8913         for_each_mddev(mddev, tmp) {
8914                 export_array(mddev);
8915                 mddev->hold_active = 0;
8916         }
8917         destroy_workqueue(md_misc_wq);
8918         destroy_workqueue(md_wq);
8919 }
8920
8921 subsys_initcall(md_init);
8922 module_exit(md_exit)
8923
8924 static int get_ro(char *buffer, struct kernel_param *kp)
8925 {
8926         return sprintf(buffer, "%d", start_readonly);
8927 }
8928 static int set_ro(const char *val, struct kernel_param *kp)
8929 {
8930         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
8931 }
8932
8933 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
8934 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
8935 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
8936
8937 MODULE_LICENSE("GPL");
8938 MODULE_DESCRIPTION("MD RAID framework");
8939 MODULE_ALIAS("md");
8940 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);