]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/md/md.c
ARM: exynos_defconfig: Increase CONFIG_CMA_SIZE_MBYTES to 96
[karo-tx-linux.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3      Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33
34    Errors, Warnings, etc.
35    Please use:
36      pr_crit() for error conditions that risk data loss
37      pr_err() for error conditions that are unexpected, like an IO error
38          or internal inconsistency
39      pr_warn() for error conditions that could have been predicated, like
40          adding a device to an array when it has incompatible metadata
41      pr_info() for every interesting, very rare events, like an array starting
42          or stopping, or resync starting or stopping
43      pr_debug() for everything else.
44
45 */
46
47 #include <linux/sched/signal.h>
48 #include <linux/kthread.h>
49 #include <linux/blkdev.h>
50 #include <linux/badblocks.h>
51 #include <linux/sysctl.h>
52 #include <linux/seq_file.h>
53 #include <linux/fs.h>
54 #include <linux/poll.h>
55 #include <linux/ctype.h>
56 #include <linux/string.h>
57 #include <linux/hdreg.h>
58 #include <linux/proc_fs.h>
59 #include <linux/random.h>
60 #include <linux/module.h>
61 #include <linux/reboot.h>
62 #include <linux/file.h>
63 #include <linux/compat.h>
64 #include <linux/delay.h>
65 #include <linux/raid/md_p.h>
66 #include <linux/raid/md_u.h>
67 #include <linux/slab.h>
68 #include <trace/events/block.h>
69 #include "md.h"
70 #include "bitmap.h"
71 #include "md-cluster.h"
72
73 #ifndef MODULE
74 static void autostart_arrays(int part);
75 #endif
76
77 /* pers_list is a list of registered personalities protected
78  * by pers_lock.
79  * pers_lock does extra service to protect accesses to
80  * mddev->thread when the mutex cannot be held.
81  */
82 static LIST_HEAD(pers_list);
83 static DEFINE_SPINLOCK(pers_lock);
84
85 struct md_cluster_operations *md_cluster_ops;
86 EXPORT_SYMBOL(md_cluster_ops);
87 struct module *md_cluster_mod;
88 EXPORT_SYMBOL(md_cluster_mod);
89
90 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
91 static struct workqueue_struct *md_wq;
92 static struct workqueue_struct *md_misc_wq;
93
94 static int remove_and_add_spares(struct mddev *mddev,
95                                  struct md_rdev *this);
96 static void mddev_detach(struct mddev *mddev);
97
98 /*
99  * Default number of read corrections we'll attempt on an rdev
100  * before ejecting it from the array. We divide the read error
101  * count by 2 for every hour elapsed between read errors.
102  */
103 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
104 /*
105  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
106  * is 1000 KB/sec, so the extra system load does not show up that much.
107  * Increase it if you want to have more _guaranteed_ speed. Note that
108  * the RAID driver will use the maximum available bandwidth if the IO
109  * subsystem is idle. There is also an 'absolute maximum' reconstruction
110  * speed limit - in case reconstruction slows down your system despite
111  * idle IO detection.
112  *
113  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
114  * or /sys/block/mdX/md/sync_speed_{min,max}
115  */
116
117 static int sysctl_speed_limit_min = 1000;
118 static int sysctl_speed_limit_max = 200000;
119 static inline int speed_min(struct mddev *mddev)
120 {
121         return mddev->sync_speed_min ?
122                 mddev->sync_speed_min : sysctl_speed_limit_min;
123 }
124
125 static inline int speed_max(struct mddev *mddev)
126 {
127         return mddev->sync_speed_max ?
128                 mddev->sync_speed_max : sysctl_speed_limit_max;
129 }
130
131 static struct ctl_table_header *raid_table_header;
132
133 static struct ctl_table raid_table[] = {
134         {
135                 .procname       = "speed_limit_min",
136                 .data           = &sysctl_speed_limit_min,
137                 .maxlen         = sizeof(int),
138                 .mode           = S_IRUGO|S_IWUSR,
139                 .proc_handler   = proc_dointvec,
140         },
141         {
142                 .procname       = "speed_limit_max",
143                 .data           = &sysctl_speed_limit_max,
144                 .maxlen         = sizeof(int),
145                 .mode           = S_IRUGO|S_IWUSR,
146                 .proc_handler   = proc_dointvec,
147         },
148         { }
149 };
150
151 static struct ctl_table raid_dir_table[] = {
152         {
153                 .procname       = "raid",
154                 .maxlen         = 0,
155                 .mode           = S_IRUGO|S_IXUGO,
156                 .child          = raid_table,
157         },
158         { }
159 };
160
161 static struct ctl_table raid_root_table[] = {
162         {
163                 .procname       = "dev",
164                 .maxlen         = 0,
165                 .mode           = 0555,
166                 .child          = raid_dir_table,
167         },
168         {  }
169 };
170
171 static const struct block_device_operations md_fops;
172
173 static int start_readonly;
174
175 /* bio_clone_mddev
176  * like bio_clone, but with a local bio set
177  */
178
179 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
180                             struct mddev *mddev)
181 {
182         struct bio *b;
183
184         if (!mddev || !mddev->bio_set)
185                 return bio_alloc(gfp_mask, nr_iovecs);
186
187         b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
188         if (!b)
189                 return NULL;
190         return b;
191 }
192 EXPORT_SYMBOL_GPL(bio_alloc_mddev);
193
194 /*
195  * We have a system wide 'event count' that is incremented
196  * on any 'interesting' event, and readers of /proc/mdstat
197  * can use 'poll' or 'select' to find out when the event
198  * count increases.
199  *
200  * Events are:
201  *  start array, stop array, error, add device, remove device,
202  *  start build, activate spare
203  */
204 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
205 static atomic_t md_event_count;
206 void md_new_event(struct mddev *mddev)
207 {
208         atomic_inc(&md_event_count);
209         wake_up(&md_event_waiters);
210 }
211 EXPORT_SYMBOL_GPL(md_new_event);
212
213 /*
214  * Enables to iterate over all existing md arrays
215  * all_mddevs_lock protects this list.
216  */
217 static LIST_HEAD(all_mddevs);
218 static DEFINE_SPINLOCK(all_mddevs_lock);
219
220 /*
221  * iterates through all used mddevs in the system.
222  * We take care to grab the all_mddevs_lock whenever navigating
223  * the list, and to always hold a refcount when unlocked.
224  * Any code which breaks out of this loop while own
225  * a reference to the current mddev and must mddev_put it.
226  */
227 #define for_each_mddev(_mddev,_tmp)                                     \
228                                                                         \
229         for (({ spin_lock(&all_mddevs_lock);                            \
230                 _tmp = all_mddevs.next;                                 \
231                 _mddev = NULL;});                                       \
232              ({ if (_tmp != &all_mddevs)                                \
233                         mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
234                 spin_unlock(&all_mddevs_lock);                          \
235                 if (_mddev) mddev_put(_mddev);                          \
236                 _mddev = list_entry(_tmp, struct mddev, all_mddevs);    \
237                 _tmp != &all_mddevs;});                                 \
238              ({ spin_lock(&all_mddevs_lock);                            \
239                 _tmp = _tmp->next;})                                    \
240                 )
241
242 /* Rather than calling directly into the personality make_request function,
243  * IO requests come here first so that we can check if the device is
244  * being suspended pending a reconfiguration.
245  * We hold a refcount over the call to ->make_request.  By the time that
246  * call has finished, the bio has been linked into some internal structure
247  * and so is visible to ->quiesce(), so we don't need the refcount any more.
248  */
249 static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
250 {
251         const int rw = bio_data_dir(bio);
252         struct mddev *mddev = q->queuedata;
253         unsigned int sectors;
254         int cpu;
255
256         blk_queue_split(q, &bio, q->bio_split);
257
258         if (mddev == NULL || mddev->pers == NULL) {
259                 bio_io_error(bio);
260                 return BLK_QC_T_NONE;
261         }
262         if (mddev->ro == 1 && unlikely(rw == WRITE)) {
263                 if (bio_sectors(bio) != 0)
264                         bio->bi_error = -EROFS;
265                 bio_endio(bio);
266                 return BLK_QC_T_NONE;
267         }
268         smp_rmb(); /* Ensure implications of  'active' are visible */
269         rcu_read_lock();
270         if (mddev->suspended) {
271                 DEFINE_WAIT(__wait);
272                 for (;;) {
273                         prepare_to_wait(&mddev->sb_wait, &__wait,
274                                         TASK_UNINTERRUPTIBLE);
275                         if (!mddev->suspended)
276                                 break;
277                         rcu_read_unlock();
278                         schedule();
279                         rcu_read_lock();
280                 }
281                 finish_wait(&mddev->sb_wait, &__wait);
282         }
283         atomic_inc(&mddev->active_io);
284         rcu_read_unlock();
285
286         /*
287          * save the sectors now since our bio can
288          * go away inside make_request
289          */
290         sectors = bio_sectors(bio);
291         /* bio could be mergeable after passing to underlayer */
292         bio->bi_opf &= ~REQ_NOMERGE;
293         mddev->pers->make_request(mddev, bio);
294
295         cpu = part_stat_lock();
296         part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
297         part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
298         part_stat_unlock();
299
300         if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
301                 wake_up(&mddev->sb_wait);
302
303         return BLK_QC_T_NONE;
304 }
305
306 /* mddev_suspend makes sure no new requests are submitted
307  * to the device, and that any requests that have been submitted
308  * are completely handled.
309  * Once mddev_detach() is called and completes, the module will be
310  * completely unused.
311  */
312 void mddev_suspend(struct mddev *mddev)
313 {
314         WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
315         if (mddev->suspended++)
316                 return;
317         synchronize_rcu();
318         wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
319         mddev->pers->quiesce(mddev, 1);
320
321         del_timer_sync(&mddev->safemode_timer);
322 }
323 EXPORT_SYMBOL_GPL(mddev_suspend);
324
325 void mddev_resume(struct mddev *mddev)
326 {
327         if (--mddev->suspended)
328                 return;
329         wake_up(&mddev->sb_wait);
330         mddev->pers->quiesce(mddev, 0);
331
332         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
333         md_wakeup_thread(mddev->thread);
334         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
335 }
336 EXPORT_SYMBOL_GPL(mddev_resume);
337
338 int mddev_congested(struct mddev *mddev, int bits)
339 {
340         struct md_personality *pers = mddev->pers;
341         int ret = 0;
342
343         rcu_read_lock();
344         if (mddev->suspended)
345                 ret = 1;
346         else if (pers && pers->congested)
347                 ret = pers->congested(mddev, bits);
348         rcu_read_unlock();
349         return ret;
350 }
351 EXPORT_SYMBOL_GPL(mddev_congested);
352 static int md_congested(void *data, int bits)
353 {
354         struct mddev *mddev = data;
355         return mddev_congested(mddev, bits);
356 }
357
358 /*
359  * Generic flush handling for md
360  */
361
362 static void md_end_flush(struct bio *bio)
363 {
364         struct md_rdev *rdev = bio->bi_private;
365         struct mddev *mddev = rdev->mddev;
366
367         rdev_dec_pending(rdev, mddev);
368
369         if (atomic_dec_and_test(&mddev->flush_pending)) {
370                 /* The pre-request flush has finished */
371                 queue_work(md_wq, &mddev->flush_work);
372         }
373         bio_put(bio);
374 }
375
376 static void md_submit_flush_data(struct work_struct *ws);
377
378 static void submit_flushes(struct work_struct *ws)
379 {
380         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
381         struct md_rdev *rdev;
382
383         INIT_WORK(&mddev->flush_work, md_submit_flush_data);
384         atomic_set(&mddev->flush_pending, 1);
385         rcu_read_lock();
386         rdev_for_each_rcu(rdev, mddev)
387                 if (rdev->raid_disk >= 0 &&
388                     !test_bit(Faulty, &rdev->flags)) {
389                         /* Take two references, one is dropped
390                          * when request finishes, one after
391                          * we reclaim rcu_read_lock
392                          */
393                         struct bio *bi;
394                         atomic_inc(&rdev->nr_pending);
395                         atomic_inc(&rdev->nr_pending);
396                         rcu_read_unlock();
397                         bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
398                         bi->bi_end_io = md_end_flush;
399                         bi->bi_private = rdev;
400                         bi->bi_bdev = rdev->bdev;
401                         bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
402                         atomic_inc(&mddev->flush_pending);
403                         submit_bio(bi);
404                         rcu_read_lock();
405                         rdev_dec_pending(rdev, mddev);
406                 }
407         rcu_read_unlock();
408         if (atomic_dec_and_test(&mddev->flush_pending))
409                 queue_work(md_wq, &mddev->flush_work);
410 }
411
412 static void md_submit_flush_data(struct work_struct *ws)
413 {
414         struct mddev *mddev = container_of(ws, struct mddev, flush_work);
415         struct bio *bio = mddev->flush_bio;
416
417         if (bio->bi_iter.bi_size == 0)
418                 /* an empty barrier - all done */
419                 bio_endio(bio);
420         else {
421                 bio->bi_opf &= ~REQ_PREFLUSH;
422                 mddev->pers->make_request(mddev, bio);
423         }
424
425         mddev->flush_bio = NULL;
426         wake_up(&mddev->sb_wait);
427 }
428
429 void md_flush_request(struct mddev *mddev, struct bio *bio)
430 {
431         spin_lock_irq(&mddev->lock);
432         wait_event_lock_irq(mddev->sb_wait,
433                             !mddev->flush_bio,
434                             mddev->lock);
435         mddev->flush_bio = bio;
436         spin_unlock_irq(&mddev->lock);
437
438         INIT_WORK(&mddev->flush_work, submit_flushes);
439         queue_work(md_wq, &mddev->flush_work);
440 }
441 EXPORT_SYMBOL(md_flush_request);
442
443 void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
444 {
445         struct mddev *mddev = cb->data;
446         md_wakeup_thread(mddev->thread);
447         kfree(cb);
448 }
449 EXPORT_SYMBOL(md_unplug);
450
451 static inline struct mddev *mddev_get(struct mddev *mddev)
452 {
453         atomic_inc(&mddev->active);
454         return mddev;
455 }
456
457 static void mddev_delayed_delete(struct work_struct *ws);
458
459 static void mddev_put(struct mddev *mddev)
460 {
461         struct bio_set *bs = NULL;
462
463         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
464                 return;
465         if (!mddev->raid_disks && list_empty(&mddev->disks) &&
466             mddev->ctime == 0 && !mddev->hold_active) {
467                 /* Array is not configured at all, and not held active,
468                  * so destroy it */
469                 list_del_init(&mddev->all_mddevs);
470                 bs = mddev->bio_set;
471                 mddev->bio_set = NULL;
472                 if (mddev->gendisk) {
473                         /* We did a probe so need to clean up.  Call
474                          * queue_work inside the spinlock so that
475                          * flush_workqueue() after mddev_find will
476                          * succeed in waiting for the work to be done.
477                          */
478                         INIT_WORK(&mddev->del_work, mddev_delayed_delete);
479                         queue_work(md_misc_wq, &mddev->del_work);
480                 } else
481                         kfree(mddev);
482         }
483         spin_unlock(&all_mddevs_lock);
484         if (bs)
485                 bioset_free(bs);
486 }
487
488 static void md_safemode_timeout(unsigned long data);
489
490 void mddev_init(struct mddev *mddev)
491 {
492         mutex_init(&mddev->open_mutex);
493         mutex_init(&mddev->reconfig_mutex);
494         mutex_init(&mddev->bitmap_info.mutex);
495         INIT_LIST_HEAD(&mddev->disks);
496         INIT_LIST_HEAD(&mddev->all_mddevs);
497         setup_timer(&mddev->safemode_timer, md_safemode_timeout,
498                     (unsigned long) mddev);
499         atomic_set(&mddev->active, 1);
500         atomic_set(&mddev->openers, 0);
501         atomic_set(&mddev->active_io, 0);
502         spin_lock_init(&mddev->lock);
503         atomic_set(&mddev->flush_pending, 0);
504         init_waitqueue_head(&mddev->sb_wait);
505         init_waitqueue_head(&mddev->recovery_wait);
506         mddev->reshape_position = MaxSector;
507         mddev->reshape_backwards = 0;
508         mddev->last_sync_action = "none";
509         mddev->resync_min = 0;
510         mddev->resync_max = MaxSector;
511         mddev->level = LEVEL_NONE;
512 }
513 EXPORT_SYMBOL_GPL(mddev_init);
514
515 static struct mddev *mddev_find(dev_t unit)
516 {
517         struct mddev *mddev, *new = NULL;
518
519         if (unit && MAJOR(unit) != MD_MAJOR)
520                 unit &= ~((1<<MdpMinorShift)-1);
521
522  retry:
523         spin_lock(&all_mddevs_lock);
524
525         if (unit) {
526                 list_for_each_entry(mddev, &all_mddevs, all_mddevs)
527                         if (mddev->unit == unit) {
528                                 mddev_get(mddev);
529                                 spin_unlock(&all_mddevs_lock);
530                                 kfree(new);
531                                 return mddev;
532                         }
533
534                 if (new) {
535                         list_add(&new->all_mddevs, &all_mddevs);
536                         spin_unlock(&all_mddevs_lock);
537                         new->hold_active = UNTIL_IOCTL;
538                         return new;
539                 }
540         } else if (new) {
541                 /* find an unused unit number */
542                 static int next_minor = 512;
543                 int start = next_minor;
544                 int is_free = 0;
545                 int dev = 0;
546                 while (!is_free) {
547                         dev = MKDEV(MD_MAJOR, next_minor);
548                         next_minor++;
549                         if (next_minor > MINORMASK)
550                                 next_minor = 0;
551                         if (next_minor == start) {
552                                 /* Oh dear, all in use. */
553                                 spin_unlock(&all_mddevs_lock);
554                                 kfree(new);
555                                 return NULL;
556                         }
557
558                         is_free = 1;
559                         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
560                                 if (mddev->unit == dev) {
561                                         is_free = 0;
562                                         break;
563                                 }
564                 }
565                 new->unit = dev;
566                 new->md_minor = MINOR(dev);
567                 new->hold_active = UNTIL_STOP;
568                 list_add(&new->all_mddevs, &all_mddevs);
569                 spin_unlock(&all_mddevs_lock);
570                 return new;
571         }
572         spin_unlock(&all_mddevs_lock);
573
574         new = kzalloc(sizeof(*new), GFP_KERNEL);
575         if (!new)
576                 return NULL;
577
578         new->unit = unit;
579         if (MAJOR(unit) == MD_MAJOR)
580                 new->md_minor = MINOR(unit);
581         else
582                 new->md_minor = MINOR(unit) >> MdpMinorShift;
583
584         mddev_init(new);
585
586         goto retry;
587 }
588
589 static struct attribute_group md_redundancy_group;
590
591 void mddev_unlock(struct mddev *mddev)
592 {
593         if (mddev->to_remove) {
594                 /* These cannot be removed under reconfig_mutex as
595                  * an access to the files will try to take reconfig_mutex
596                  * while holding the file unremovable, which leads to
597                  * a deadlock.
598                  * So hold set sysfs_active while the remove in happeing,
599                  * and anything else which might set ->to_remove or my
600                  * otherwise change the sysfs namespace will fail with
601                  * -EBUSY if sysfs_active is still set.
602                  * We set sysfs_active under reconfig_mutex and elsewhere
603                  * test it under the same mutex to ensure its correct value
604                  * is seen.
605                  */
606                 struct attribute_group *to_remove = mddev->to_remove;
607                 mddev->to_remove = NULL;
608                 mddev->sysfs_active = 1;
609                 mutex_unlock(&mddev->reconfig_mutex);
610
611                 if (mddev->kobj.sd) {
612                         if (to_remove != &md_redundancy_group)
613                                 sysfs_remove_group(&mddev->kobj, to_remove);
614                         if (mddev->pers == NULL ||
615                             mddev->pers->sync_request == NULL) {
616                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
617                                 if (mddev->sysfs_action)
618                                         sysfs_put(mddev->sysfs_action);
619                                 mddev->sysfs_action = NULL;
620                         }
621                 }
622                 mddev->sysfs_active = 0;
623         } else
624                 mutex_unlock(&mddev->reconfig_mutex);
625
626         /* As we've dropped the mutex we need a spinlock to
627          * make sure the thread doesn't disappear
628          */
629         spin_lock(&pers_lock);
630         md_wakeup_thread(mddev->thread);
631         spin_unlock(&pers_lock);
632 }
633 EXPORT_SYMBOL_GPL(mddev_unlock);
634
635 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
636 {
637         struct md_rdev *rdev;
638
639         rdev_for_each_rcu(rdev, mddev)
640                 if (rdev->desc_nr == nr)
641                         return rdev;
642
643         return NULL;
644 }
645 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
646
647 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
648 {
649         struct md_rdev *rdev;
650
651         rdev_for_each(rdev, mddev)
652                 if (rdev->bdev->bd_dev == dev)
653                         return rdev;
654
655         return NULL;
656 }
657
658 static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
659 {
660         struct md_rdev *rdev;
661
662         rdev_for_each_rcu(rdev, mddev)
663                 if (rdev->bdev->bd_dev == dev)
664                         return rdev;
665
666         return NULL;
667 }
668
669 static struct md_personality *find_pers(int level, char *clevel)
670 {
671         struct md_personality *pers;
672         list_for_each_entry(pers, &pers_list, list) {
673                 if (level != LEVEL_NONE && pers->level == level)
674                         return pers;
675                 if (strcmp(pers->name, clevel)==0)
676                         return pers;
677         }
678         return NULL;
679 }
680
681 /* return the offset of the super block in 512byte sectors */
682 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
683 {
684         sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
685         return MD_NEW_SIZE_SECTORS(num_sectors);
686 }
687
688 static int alloc_disk_sb(struct md_rdev *rdev)
689 {
690         rdev->sb_page = alloc_page(GFP_KERNEL);
691         if (!rdev->sb_page)
692                 return -ENOMEM;
693         return 0;
694 }
695
696 void md_rdev_clear(struct md_rdev *rdev)
697 {
698         if (rdev->sb_page) {
699                 put_page(rdev->sb_page);
700                 rdev->sb_loaded = 0;
701                 rdev->sb_page = NULL;
702                 rdev->sb_start = 0;
703                 rdev->sectors = 0;
704         }
705         if (rdev->bb_page) {
706                 put_page(rdev->bb_page);
707                 rdev->bb_page = NULL;
708         }
709         badblocks_exit(&rdev->badblocks);
710 }
711 EXPORT_SYMBOL_GPL(md_rdev_clear);
712
713 static void super_written(struct bio *bio)
714 {
715         struct md_rdev *rdev = bio->bi_private;
716         struct mddev *mddev = rdev->mddev;
717
718         if (bio->bi_error) {
719                 pr_err("md: super_written gets error=%d\n", bio->bi_error);
720                 md_error(mddev, rdev);
721                 if (!test_bit(Faulty, &rdev->flags)
722                     && (bio->bi_opf & MD_FAILFAST)) {
723                         set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
724                         set_bit(LastDev, &rdev->flags);
725                 }
726         } else
727                 clear_bit(LastDev, &rdev->flags);
728
729         if (atomic_dec_and_test(&mddev->pending_writes))
730                 wake_up(&mddev->sb_wait);
731         rdev_dec_pending(rdev, mddev);
732         bio_put(bio);
733 }
734
735 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
736                    sector_t sector, int size, struct page *page)
737 {
738         /* write first size bytes of page to sector of rdev
739          * Increment mddev->pending_writes before returning
740          * and decrement it on completion, waking up sb_wait
741          * if zero is reached.
742          * If an error occurred, call md_error
743          */
744         struct bio *bio;
745         int ff = 0;
746
747         if (test_bit(Faulty, &rdev->flags))
748                 return;
749
750         bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
751
752         atomic_inc(&rdev->nr_pending);
753
754         bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
755         bio->bi_iter.bi_sector = sector;
756         bio_add_page(bio, page, size, 0);
757         bio->bi_private = rdev;
758         bio->bi_end_io = super_written;
759
760         if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
761             test_bit(FailFast, &rdev->flags) &&
762             !test_bit(LastDev, &rdev->flags))
763                 ff = MD_FAILFAST;
764         bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff;
765
766         atomic_inc(&mddev->pending_writes);
767         submit_bio(bio);
768 }
769
770 int md_super_wait(struct mddev *mddev)
771 {
772         /* wait for all superblock writes that were scheduled to complete */
773         wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
774         if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
775                 return -EAGAIN;
776         return 0;
777 }
778
779 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
780                  struct page *page, int op, int op_flags, bool metadata_op)
781 {
782         struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
783         int ret;
784
785         bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
786                 rdev->meta_bdev : rdev->bdev;
787         bio_set_op_attrs(bio, op, op_flags);
788         if (metadata_op)
789                 bio->bi_iter.bi_sector = sector + rdev->sb_start;
790         else if (rdev->mddev->reshape_position != MaxSector &&
791                  (rdev->mddev->reshape_backwards ==
792                   (sector >= rdev->mddev->reshape_position)))
793                 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
794         else
795                 bio->bi_iter.bi_sector = sector + rdev->data_offset;
796         bio_add_page(bio, page, size, 0);
797
798         submit_bio_wait(bio);
799
800         ret = !bio->bi_error;
801         bio_put(bio);
802         return ret;
803 }
804 EXPORT_SYMBOL_GPL(sync_page_io);
805
806 static int read_disk_sb(struct md_rdev *rdev, int size)
807 {
808         char b[BDEVNAME_SIZE];
809
810         if (rdev->sb_loaded)
811                 return 0;
812
813         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
814                 goto fail;
815         rdev->sb_loaded = 1;
816         return 0;
817
818 fail:
819         pr_err("md: disabled device %s, could not read superblock.\n",
820                bdevname(rdev->bdev,b));
821         return -EINVAL;
822 }
823
824 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
825 {
826         return  sb1->set_uuid0 == sb2->set_uuid0 &&
827                 sb1->set_uuid1 == sb2->set_uuid1 &&
828                 sb1->set_uuid2 == sb2->set_uuid2 &&
829                 sb1->set_uuid3 == sb2->set_uuid3;
830 }
831
832 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
833 {
834         int ret;
835         mdp_super_t *tmp1, *tmp2;
836
837         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
838         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
839
840         if (!tmp1 || !tmp2) {
841                 ret = 0;
842                 goto abort;
843         }
844
845         *tmp1 = *sb1;
846         *tmp2 = *sb2;
847
848         /*
849          * nr_disks is not constant
850          */
851         tmp1->nr_disks = 0;
852         tmp2->nr_disks = 0;
853
854         ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
855 abort:
856         kfree(tmp1);
857         kfree(tmp2);
858         return ret;
859 }
860
861 static u32 md_csum_fold(u32 csum)
862 {
863         csum = (csum & 0xffff) + (csum >> 16);
864         return (csum & 0xffff) + (csum >> 16);
865 }
866
867 static unsigned int calc_sb_csum(mdp_super_t *sb)
868 {
869         u64 newcsum = 0;
870         u32 *sb32 = (u32*)sb;
871         int i;
872         unsigned int disk_csum, csum;
873
874         disk_csum = sb->sb_csum;
875         sb->sb_csum = 0;
876
877         for (i = 0; i < MD_SB_BYTES/4 ; i++)
878                 newcsum += sb32[i];
879         csum = (newcsum & 0xffffffff) + (newcsum>>32);
880
881 #ifdef CONFIG_ALPHA
882         /* This used to use csum_partial, which was wrong for several
883          * reasons including that different results are returned on
884          * different architectures.  It isn't critical that we get exactly
885          * the same return value as before (we always csum_fold before
886          * testing, and that removes any differences).  However as we
887          * know that csum_partial always returned a 16bit value on
888          * alphas, do a fold to maximise conformity to previous behaviour.
889          */
890         sb->sb_csum = md_csum_fold(disk_csum);
891 #else
892         sb->sb_csum = disk_csum;
893 #endif
894         return csum;
895 }
896
897 /*
898  * Handle superblock details.
899  * We want to be able to handle multiple superblock formats
900  * so we have a common interface to them all, and an array of
901  * different handlers.
902  * We rely on user-space to write the initial superblock, and support
903  * reading and updating of superblocks.
904  * Interface methods are:
905  *   int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
906  *      loads and validates a superblock on dev.
907  *      if refdev != NULL, compare superblocks on both devices
908  *    Return:
909  *      0 - dev has a superblock that is compatible with refdev
910  *      1 - dev has a superblock that is compatible and newer than refdev
911  *          so dev should be used as the refdev in future
912  *     -EINVAL superblock incompatible or invalid
913  *     -othererror e.g. -EIO
914  *
915  *   int validate_super(struct mddev *mddev, struct md_rdev *dev)
916  *      Verify that dev is acceptable into mddev.
917  *       The first time, mddev->raid_disks will be 0, and data from
918  *       dev should be merged in.  Subsequent calls check that dev
919  *       is new enough.  Return 0 or -EINVAL
920  *
921  *   void sync_super(struct mddev *mddev, struct md_rdev *dev)
922  *     Update the superblock for rdev with data in mddev
923  *     This does not write to disc.
924  *
925  */
926
927 struct super_type  {
928         char                *name;
929         struct module       *owner;
930         int                 (*load_super)(struct md_rdev *rdev,
931                                           struct md_rdev *refdev,
932                                           int minor_version);
933         int                 (*validate_super)(struct mddev *mddev,
934                                               struct md_rdev *rdev);
935         void                (*sync_super)(struct mddev *mddev,
936                                           struct md_rdev *rdev);
937         unsigned long long  (*rdev_size_change)(struct md_rdev *rdev,
938                                                 sector_t num_sectors);
939         int                 (*allow_new_offset)(struct md_rdev *rdev,
940                                                 unsigned long long new_offset);
941 };
942
943 /*
944  * Check that the given mddev has no bitmap.
945  *
946  * This function is called from the run method of all personalities that do not
947  * support bitmaps. It prints an error message and returns non-zero if mddev
948  * has a bitmap. Otherwise, it returns 0.
949  *
950  */
951 int md_check_no_bitmap(struct mddev *mddev)
952 {
953         if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
954                 return 0;
955         pr_warn("%s: bitmaps are not supported for %s\n",
956                 mdname(mddev), mddev->pers->name);
957         return 1;
958 }
959 EXPORT_SYMBOL(md_check_no_bitmap);
960
961 /*
962  * load_super for 0.90.0
963  */
964 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
965 {
966         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
967         mdp_super_t *sb;
968         int ret;
969
970         /*
971          * Calculate the position of the superblock (512byte sectors),
972          * it's at the end of the disk.
973          *
974          * It also happens to be a multiple of 4Kb.
975          */
976         rdev->sb_start = calc_dev_sboffset(rdev);
977
978         ret = read_disk_sb(rdev, MD_SB_BYTES);
979         if (ret)
980                 return ret;
981
982         ret = -EINVAL;
983
984         bdevname(rdev->bdev, b);
985         sb = page_address(rdev->sb_page);
986
987         if (sb->md_magic != MD_SB_MAGIC) {
988                 pr_warn("md: invalid raid superblock magic on %s\n", b);
989                 goto abort;
990         }
991
992         if (sb->major_version != 0 ||
993             sb->minor_version < 90 ||
994             sb->minor_version > 91) {
995                 pr_warn("Bad version number %d.%d on %s\n",
996                         sb->major_version, sb->minor_version, b);
997                 goto abort;
998         }
999
1000         if (sb->raid_disks <= 0)
1001                 goto abort;
1002
1003         if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
1004                 pr_warn("md: invalid superblock checksum on %s\n", b);
1005                 goto abort;
1006         }
1007
1008         rdev->preferred_minor = sb->md_minor;
1009         rdev->data_offset = 0;
1010         rdev->new_data_offset = 0;
1011         rdev->sb_size = MD_SB_BYTES;
1012         rdev->badblocks.shift = -1;
1013
1014         if (sb->level == LEVEL_MULTIPATH)
1015                 rdev->desc_nr = -1;
1016         else
1017                 rdev->desc_nr = sb->this_disk.number;
1018
1019         if (!refdev) {
1020                 ret = 1;
1021         } else {
1022                 __u64 ev1, ev2;
1023                 mdp_super_t *refsb = page_address(refdev->sb_page);
1024                 if (!uuid_equal(refsb, sb)) {
1025                         pr_warn("md: %s has different UUID to %s\n",
1026                                 b, bdevname(refdev->bdev,b2));
1027                         goto abort;
1028                 }
1029                 if (!sb_equal(refsb, sb)) {
1030                         pr_warn("md: %s has same UUID but different superblock to %s\n",
1031                                 b, bdevname(refdev->bdev, b2));
1032                         goto abort;
1033                 }
1034                 ev1 = md_event(sb);
1035                 ev2 = md_event(refsb);
1036                 if (ev1 > ev2)
1037                         ret = 1;
1038                 else
1039                         ret = 0;
1040         }
1041         rdev->sectors = rdev->sb_start;
1042         /* Limit to 4TB as metadata cannot record more than that.
1043          * (not needed for Linear and RAID0 as metadata doesn't
1044          * record this size)
1045          */
1046         if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
1047             sb->level >= 1)
1048                 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1049
1050         if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1051                 /* "this cannot possibly happen" ... */
1052                 ret = -EINVAL;
1053
1054  abort:
1055         return ret;
1056 }
1057
1058 /*
1059  * validate_super for 0.90.0
1060  */
1061 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
1062 {
1063         mdp_disk_t *desc;
1064         mdp_super_t *sb = page_address(rdev->sb_page);
1065         __u64 ev1 = md_event(sb);
1066
1067         rdev->raid_disk = -1;
1068         clear_bit(Faulty, &rdev->flags);
1069         clear_bit(In_sync, &rdev->flags);
1070         clear_bit(Bitmap_sync, &rdev->flags);
1071         clear_bit(WriteMostly, &rdev->flags);
1072
1073         if (mddev->raid_disks == 0) {
1074                 mddev->major_version = 0;
1075                 mddev->minor_version = sb->minor_version;
1076                 mddev->patch_version = sb->patch_version;
1077                 mddev->external = 0;
1078                 mddev->chunk_sectors = sb->chunk_size >> 9;
1079                 mddev->ctime = sb->ctime;
1080                 mddev->utime = sb->utime;
1081                 mddev->level = sb->level;
1082                 mddev->clevel[0] = 0;
1083                 mddev->layout = sb->layout;
1084                 mddev->raid_disks = sb->raid_disks;
1085                 mddev->dev_sectors = ((sector_t)sb->size) * 2;
1086                 mddev->events = ev1;
1087                 mddev->bitmap_info.offset = 0;
1088                 mddev->bitmap_info.space = 0;
1089                 /* bitmap can use 60 K after the 4K superblocks */
1090                 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
1091                 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
1092                 mddev->reshape_backwards = 0;
1093
1094                 if (mddev->minor_version >= 91) {
1095                         mddev->reshape_position = sb->reshape_position;
1096                         mddev->delta_disks = sb->delta_disks;
1097                         mddev->new_level = sb->new_level;
1098                         mddev->new_layout = sb->new_layout;
1099                         mddev->new_chunk_sectors = sb->new_chunk >> 9;
1100                         if (mddev->delta_disks < 0)
1101                                 mddev->reshape_backwards = 1;
1102                 } else {
1103                         mddev->reshape_position = MaxSector;
1104                         mddev->delta_disks = 0;
1105                         mddev->new_level = mddev->level;
1106                         mddev->new_layout = mddev->layout;
1107                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1108                 }
1109
1110                 if (sb->state & (1<<MD_SB_CLEAN))
1111                         mddev->recovery_cp = MaxSector;
1112                 else {
1113                         if (sb->events_hi == sb->cp_events_hi &&
1114                                 sb->events_lo == sb->cp_events_lo) {
1115                                 mddev->recovery_cp = sb->recovery_cp;
1116                         } else
1117                                 mddev->recovery_cp = 0;
1118                 }
1119
1120                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
1121                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
1122                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
1123                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
1124
1125                 mddev->max_disks = MD_SB_DISKS;
1126
1127                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
1128                     mddev->bitmap_info.file == NULL) {
1129                         mddev->bitmap_info.offset =
1130                                 mddev->bitmap_info.default_offset;
1131                         mddev->bitmap_info.space =
1132                                 mddev->bitmap_info.default_space;
1133                 }
1134
1135         } else if (mddev->pers == NULL) {
1136                 /* Insist on good event counter while assembling, except
1137                  * for spares (which don't need an event count) */
1138                 ++ev1;
1139                 if (sb->disks[rdev->desc_nr].state & (
1140                             (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
1141                         if (ev1 < mddev->events)
1142                                 return -EINVAL;
1143         } else if (mddev->bitmap) {
1144                 /* if adding to array with a bitmap, then we can accept an
1145                  * older device ... but not too old.
1146                  */
1147                 if (ev1 < mddev->bitmap->events_cleared)
1148                         return 0;
1149                 if (ev1 < mddev->events)
1150                         set_bit(Bitmap_sync, &rdev->flags);
1151         } else {
1152                 if (ev1 < mddev->events)
1153                         /* just a hot-add of a new device, leave raid_disk at -1 */
1154                         return 0;
1155         }
1156
1157         if (mddev->level != LEVEL_MULTIPATH) {
1158                 desc = sb->disks + rdev->desc_nr;
1159
1160                 if (desc->state & (1<<MD_DISK_FAULTY))
1161                         set_bit(Faulty, &rdev->flags);
1162                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
1163                             desc->raid_disk < mddev->raid_disks */) {
1164                         set_bit(In_sync, &rdev->flags);
1165                         rdev->raid_disk = desc->raid_disk;
1166                         rdev->saved_raid_disk = desc->raid_disk;
1167                 } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
1168                         /* active but not in sync implies recovery up to
1169                          * reshape position.  We don't know exactly where
1170                          * that is, so set to zero for now */
1171                         if (mddev->minor_version >= 91) {
1172                                 rdev->recovery_offset = 0;
1173                                 rdev->raid_disk = desc->raid_disk;
1174                         }
1175                 }
1176                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
1177                         set_bit(WriteMostly, &rdev->flags);
1178                 if (desc->state & (1<<MD_DISK_FAILFAST))
1179                         set_bit(FailFast, &rdev->flags);
1180         } else /* MULTIPATH are always insync */
1181                 set_bit(In_sync, &rdev->flags);
1182         return 0;
1183 }
1184
1185 /*
1186  * sync_super for 0.90.0
1187  */
1188 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1189 {
1190         mdp_super_t *sb;
1191         struct md_rdev *rdev2;
1192         int next_spare = mddev->raid_disks;
1193
1194         /* make rdev->sb match mddev data..
1195          *
1196          * 1/ zero out disks
1197          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
1198          * 3/ any empty disks < next_spare become removed
1199          *
1200          * disks[0] gets initialised to REMOVED because
1201          * we cannot be sure from other fields if it has
1202          * been initialised or not.
1203          */
1204         int i;
1205         int active=0, working=0,failed=0,spare=0,nr_disks=0;
1206
1207         rdev->sb_size = MD_SB_BYTES;
1208
1209         sb = page_address(rdev->sb_page);
1210
1211         memset(sb, 0, sizeof(*sb));
1212
1213         sb->md_magic = MD_SB_MAGIC;
1214         sb->major_version = mddev->major_version;
1215         sb->patch_version = mddev->patch_version;
1216         sb->gvalid_words  = 0; /* ignored */
1217         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
1218         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
1219         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
1220         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
1221
1222         sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
1223         sb->level = mddev->level;
1224         sb->size = mddev->dev_sectors / 2;
1225         sb->raid_disks = mddev->raid_disks;
1226         sb->md_minor = mddev->md_minor;
1227         sb->not_persistent = 0;
1228         sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
1229         sb->state = 0;
1230         sb->events_hi = (mddev->events>>32);
1231         sb->events_lo = (u32)mddev->events;
1232
1233         if (mddev->reshape_position == MaxSector)
1234                 sb->minor_version = 90;
1235         else {
1236                 sb->minor_version = 91;
1237                 sb->reshape_position = mddev->reshape_position;
1238                 sb->new_level = mddev->new_level;
1239                 sb->delta_disks = mddev->delta_disks;
1240                 sb->new_layout = mddev->new_layout;
1241                 sb->new_chunk = mddev->new_chunk_sectors << 9;
1242         }
1243         mddev->minor_version = sb->minor_version;
1244         if (mddev->in_sync)
1245         {
1246                 sb->recovery_cp = mddev->recovery_cp;
1247                 sb->cp_events_hi = (mddev->events>>32);
1248                 sb->cp_events_lo = (u32)mddev->events;
1249                 if (mddev->recovery_cp == MaxSector)
1250                         sb->state = (1<< MD_SB_CLEAN);
1251         } else
1252                 sb->recovery_cp = 0;
1253
1254         sb->layout = mddev->layout;
1255         sb->chunk_size = mddev->chunk_sectors << 9;
1256
1257         if (mddev->bitmap && mddev->bitmap_info.file == NULL)
1258                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
1259
1260         sb->disks[0].state = (1<<MD_DISK_REMOVED);
1261         rdev_for_each(rdev2, mddev) {
1262                 mdp_disk_t *d;
1263                 int desc_nr;
1264                 int is_active = test_bit(In_sync, &rdev2->flags);
1265
1266                 if (rdev2->raid_disk >= 0 &&
1267                     sb->minor_version >= 91)
1268                         /* we have nowhere to store the recovery_offset,
1269                          * but if it is not below the reshape_position,
1270                          * we can piggy-back on that.
1271                          */
1272                         is_active = 1;
1273                 if (rdev2->raid_disk < 0 ||
1274                     test_bit(Faulty, &rdev2->flags))
1275                         is_active = 0;
1276                 if (is_active)
1277                         desc_nr = rdev2->raid_disk;
1278                 else
1279                         desc_nr = next_spare++;
1280                 rdev2->desc_nr = desc_nr;
1281                 d = &sb->disks[rdev2->desc_nr];
1282                 nr_disks++;
1283                 d->number = rdev2->desc_nr;
1284                 d->major = MAJOR(rdev2->bdev->bd_dev);
1285                 d->minor = MINOR(rdev2->bdev->bd_dev);
1286                 if (is_active)
1287                         d->raid_disk = rdev2->raid_disk;
1288                 else
1289                         d->raid_disk = rdev2->desc_nr; /* compatibility */
1290                 if (test_bit(Faulty, &rdev2->flags))
1291                         d->state = (1<<MD_DISK_FAULTY);
1292                 else if (is_active) {
1293                         d->state = (1<<MD_DISK_ACTIVE);
1294                         if (test_bit(In_sync, &rdev2->flags))
1295                                 d->state |= (1<<MD_DISK_SYNC);
1296                         active++;
1297                         working++;
1298                 } else {
1299                         d->state = 0;
1300                         spare++;
1301                         working++;
1302                 }
1303                 if (test_bit(WriteMostly, &rdev2->flags))
1304                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
1305                 if (test_bit(FailFast, &rdev2->flags))
1306                         d->state |= (1<<MD_DISK_FAILFAST);
1307         }
1308         /* now set the "removed" and "faulty" bits on any missing devices */
1309         for (i=0 ; i < mddev->raid_disks ; i++) {
1310                 mdp_disk_t *d = &sb->disks[i];
1311                 if (d->state == 0 && d->number == 0) {
1312                         d->number = i;
1313                         d->raid_disk = i;
1314                         d->state = (1<<MD_DISK_REMOVED);
1315                         d->state |= (1<<MD_DISK_FAULTY);
1316                         failed++;
1317                 }
1318         }
1319         sb->nr_disks = nr_disks;
1320         sb->active_disks = active;
1321         sb->working_disks = working;
1322         sb->failed_disks = failed;
1323         sb->spare_disks = spare;
1324
1325         sb->this_disk = sb->disks[rdev->desc_nr];
1326         sb->sb_csum = calc_sb_csum(sb);
1327 }
1328
1329 /*
1330  * rdev_size_change for 0.90.0
1331  */
1332 static unsigned long long
1333 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1334 {
1335         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1336                 return 0; /* component must fit device */
1337         if (rdev->mddev->bitmap_info.offset)
1338                 return 0; /* can't move bitmap */
1339         rdev->sb_start = calc_dev_sboffset(rdev);
1340         if (!num_sectors || num_sectors > rdev->sb_start)
1341                 num_sectors = rdev->sb_start;
1342         /* Limit to 4TB as metadata cannot record more than that.
1343          * 4TB == 2^32 KB, or 2*2^32 sectors.
1344          */
1345         if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
1346             rdev->mddev->level >= 1)
1347                 num_sectors = (sector_t)(2ULL << 32) - 2;
1348         do {
1349                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1350                        rdev->sb_page);
1351         } while (md_super_wait(rdev->mddev) < 0);
1352         return num_sectors;
1353 }
1354
1355 static int
1356 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1357 {
1358         /* non-zero offset changes not possible with v0.90 */
1359         return new_offset == 0;
1360 }
1361
1362 /*
1363  * version 1 superblock
1364  */
1365
1366 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
1367 {
1368         __le32 disk_csum;
1369         u32 csum;
1370         unsigned long long newcsum;
1371         int size = 256 + le32_to_cpu(sb->max_dev)*2;
1372         __le32 *isuper = (__le32*)sb;
1373
1374         disk_csum = sb->sb_csum;
1375         sb->sb_csum = 0;
1376         newcsum = 0;
1377         for (; size >= 4; size -= 4)
1378                 newcsum += le32_to_cpu(*isuper++);
1379
1380         if (size == 2)
1381                 newcsum += le16_to_cpu(*(__le16*) isuper);
1382
1383         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
1384         sb->sb_csum = disk_csum;
1385         return cpu_to_le32(csum);
1386 }
1387
1388 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1389 {
1390         struct mdp_superblock_1 *sb;
1391         int ret;
1392         sector_t sb_start;
1393         sector_t sectors;
1394         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1395         int bmask;
1396
1397         /*
1398          * Calculate the position of the superblock in 512byte sectors.
1399          * It is always aligned to a 4K boundary and
1400          * depeding on minor_version, it can be:
1401          * 0: At least 8K, but less than 12K, from end of device
1402          * 1: At start of device
1403          * 2: 4K from start of device.
1404          */
1405         switch(minor_version) {
1406         case 0:
1407                 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1408                 sb_start -= 8*2;
1409                 sb_start &= ~(sector_t)(4*2-1);
1410                 break;
1411         case 1:
1412                 sb_start = 0;
1413                 break;
1414         case 2:
1415                 sb_start = 8;
1416                 break;
1417         default:
1418                 return -EINVAL;
1419         }
1420         rdev->sb_start = sb_start;
1421
1422         /* superblock is rarely larger than 1K, but it can be larger,
1423          * and it is safe to read 4k, so we do that
1424          */
1425         ret = read_disk_sb(rdev, 4096);
1426         if (ret) return ret;
1427
1428         sb = page_address(rdev->sb_page);
1429
1430         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1431             sb->major_version != cpu_to_le32(1) ||
1432             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1433             le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1434             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1435                 return -EINVAL;
1436
1437         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1438                 pr_warn("md: invalid superblock checksum on %s\n",
1439                         bdevname(rdev->bdev,b));
1440                 return -EINVAL;
1441         }
1442         if (le64_to_cpu(sb->data_size) < 10) {
1443                 pr_warn("md: data_size too small on %s\n",
1444                         bdevname(rdev->bdev,b));
1445                 return -EINVAL;
1446         }
1447         if (sb->pad0 ||
1448             sb->pad3[0] ||
1449             memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
1450                 /* Some padding is non-zero, might be a new feature */
1451                 return -EINVAL;
1452
1453         rdev->preferred_minor = 0xffff;
1454         rdev->data_offset = le64_to_cpu(sb->data_offset);
1455         rdev->new_data_offset = rdev->data_offset;
1456         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
1457             (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
1458                 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1459         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1460
1461         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1462         bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1463         if (rdev->sb_size & bmask)
1464                 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1465
1466         if (minor_version
1467             && rdev->data_offset < sb_start + (rdev->sb_size/512))
1468                 return -EINVAL;
1469         if (minor_version
1470             && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1471                 return -EINVAL;
1472
1473         if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
1474                 rdev->desc_nr = -1;
1475         else
1476                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1477
1478         if (!rdev->bb_page) {
1479                 rdev->bb_page = alloc_page(GFP_KERNEL);
1480                 if (!rdev->bb_page)
1481                         return -ENOMEM;
1482         }
1483         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
1484             rdev->badblocks.count == 0) {
1485                 /* need to load the bad block list.
1486                  * Currently we limit it to one page.
1487                  */
1488                 s32 offset;
1489                 sector_t bb_sector;
1490                 u64 *bbp;
1491                 int i;
1492                 int sectors = le16_to_cpu(sb->bblog_size);
1493                 if (sectors > (PAGE_SIZE / 512))
1494                         return -EINVAL;
1495                 offset = le32_to_cpu(sb->bblog_offset);
1496                 if (offset == 0)
1497                         return -EINVAL;
1498                 bb_sector = (long long)offset;
1499                 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1500                                   rdev->bb_page, REQ_OP_READ, 0, true))
1501                         return -EIO;
1502                 bbp = (u64 *)page_address(rdev->bb_page);
1503                 rdev->badblocks.shift = sb->bblog_shift;
1504                 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
1505                         u64 bb = le64_to_cpu(*bbp);
1506                         int count = bb & (0x3ff);
1507                         u64 sector = bb >> 10;
1508                         sector <<= sb->bblog_shift;
1509                         count <<= sb->bblog_shift;
1510                         if (bb + 1 == 0)
1511                                 break;
1512                         if (badblocks_set(&rdev->badblocks, sector, count, 1))
1513                                 return -EINVAL;
1514                 }
1515         } else if (sb->bblog_offset != 0)
1516                 rdev->badblocks.shift = 0;
1517
1518         if (!refdev) {
1519                 ret = 1;
1520         } else {
1521                 __u64 ev1, ev2;
1522                 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
1523
1524                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1525                     sb->level != refsb->level ||
1526                     sb->layout != refsb->layout ||
1527                     sb->chunksize != refsb->chunksize) {
1528                         pr_warn("md: %s has strangely different superblock to %s\n",
1529                                 bdevname(rdev->bdev,b),
1530                                 bdevname(refdev->bdev,b2));
1531                         return -EINVAL;
1532                 }
1533                 ev1 = le64_to_cpu(sb->events);
1534                 ev2 = le64_to_cpu(refsb->events);
1535
1536                 if (ev1 > ev2)
1537                         ret = 1;
1538                 else
1539                         ret = 0;
1540         }
1541         if (minor_version) {
1542                 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1543                 sectors -= rdev->data_offset;
1544         } else
1545                 sectors = rdev->sb_start;
1546         if (sectors < le64_to_cpu(sb->data_size))
1547                 return -EINVAL;
1548         rdev->sectors = le64_to_cpu(sb->data_size);
1549         return ret;
1550 }
1551
1552 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
1553 {
1554         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1555         __u64 ev1 = le64_to_cpu(sb->events);
1556
1557         rdev->raid_disk = -1;
1558         clear_bit(Faulty, &rdev->flags);
1559         clear_bit(In_sync, &rdev->flags);
1560         clear_bit(Bitmap_sync, &rdev->flags);
1561         clear_bit(WriteMostly, &rdev->flags);
1562
1563         if (mddev->raid_disks == 0) {
1564                 mddev->major_version = 1;
1565                 mddev->patch_version = 0;
1566                 mddev->external = 0;
1567                 mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
1568                 mddev->ctime = le64_to_cpu(sb->ctime);
1569                 mddev->utime = le64_to_cpu(sb->utime);
1570                 mddev->level = le32_to_cpu(sb->level);
1571                 mddev->clevel[0] = 0;
1572                 mddev->layout = le32_to_cpu(sb->layout);
1573                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1574                 mddev->dev_sectors = le64_to_cpu(sb->size);
1575                 mddev->events = ev1;
1576                 mddev->bitmap_info.offset = 0;
1577                 mddev->bitmap_info.space = 0;
1578                 /* Default location for bitmap is 1K after superblock
1579                  * using 3K - total of 4K
1580                  */
1581                 mddev->bitmap_info.default_offset = 1024 >> 9;
1582                 mddev->bitmap_info.default_space = (4096-1024) >> 9;
1583                 mddev->reshape_backwards = 0;
1584
1585                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1586                 memcpy(mddev->uuid, sb->set_uuid, 16);
1587
1588                 mddev->max_disks =  (4096-256)/2;
1589
1590                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1591                     mddev->bitmap_info.file == NULL) {
1592                         mddev->bitmap_info.offset =
1593                                 (__s32)le32_to_cpu(sb->bitmap_offset);
1594                         /* Metadata doesn't record how much space is available.
1595                          * For 1.0, we assume we can use up to the superblock
1596                          * if before, else to 4K beyond superblock.
1597                          * For others, assume no change is possible.
1598                          */
1599                         if (mddev->minor_version > 0)
1600                                 mddev->bitmap_info.space = 0;
1601                         else if (mddev->bitmap_info.offset > 0)
1602                                 mddev->bitmap_info.space =
1603                                         8 - mddev->bitmap_info.offset;
1604                         else
1605                                 mddev->bitmap_info.space =
1606                                         -mddev->bitmap_info.offset;
1607                 }
1608
1609                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1610                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1611                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1612                         mddev->new_level = le32_to_cpu(sb->new_level);
1613                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1614                         mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
1615                         if (mddev->delta_disks < 0 ||
1616                             (mddev->delta_disks == 0 &&
1617                              (le32_to_cpu(sb->feature_map)
1618                               & MD_FEATURE_RESHAPE_BACKWARDS)))
1619                                 mddev->reshape_backwards = 1;
1620                 } else {
1621                         mddev->reshape_position = MaxSector;
1622                         mddev->delta_disks = 0;
1623                         mddev->new_level = mddev->level;
1624                         mddev->new_layout = mddev->layout;
1625                         mddev->new_chunk_sectors = mddev->chunk_sectors;
1626                 }
1627
1628                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
1629                         set_bit(MD_HAS_JOURNAL, &mddev->flags);
1630         } else if (mddev->pers == NULL) {
1631                 /* Insist of good event counter while assembling, except for
1632                  * spares (which don't need an event count) */
1633                 ++ev1;
1634                 if (rdev->desc_nr >= 0 &&
1635                     rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1636                     (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1637                      le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1638                         if (ev1 < mddev->events)
1639                                 return -EINVAL;
1640         } else if (mddev->bitmap) {
1641                 /* If adding to array with a bitmap, then we can accept an
1642                  * older device, but not too old.
1643                  */
1644                 if (ev1 < mddev->bitmap->events_cleared)
1645                         return 0;
1646                 if (ev1 < mddev->events)
1647                         set_bit(Bitmap_sync, &rdev->flags);
1648         } else {
1649                 if (ev1 < mddev->events)
1650                         /* just a hot-add of a new device, leave raid_disk at -1 */
1651                         return 0;
1652         }
1653         if (mddev->level != LEVEL_MULTIPATH) {
1654                 int role;
1655                 if (rdev->desc_nr < 0 ||
1656                     rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1657                         role = MD_DISK_ROLE_SPARE;
1658                         rdev->desc_nr = -1;
1659                 } else
1660                         role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1661                 switch(role) {
1662                 case MD_DISK_ROLE_SPARE: /* spare */
1663                         break;
1664                 case MD_DISK_ROLE_FAULTY: /* faulty */
1665                         set_bit(Faulty, &rdev->flags);
1666                         break;
1667                 case MD_DISK_ROLE_JOURNAL: /* journal device */
1668                         if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
1669                                 /* journal device without journal feature */
1670                                 pr_warn("md: journal device provided without journal feature, ignoring the device\n");
1671                                 return -EINVAL;
1672                         }
1673                         set_bit(Journal, &rdev->flags);
1674                         rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1675                         rdev->raid_disk = 0;
1676                         break;
1677                 default:
1678                         rdev->saved_raid_disk = role;
1679                         if ((le32_to_cpu(sb->feature_map) &
1680                              MD_FEATURE_RECOVERY_OFFSET)) {
1681                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1682                                 if (!(le32_to_cpu(sb->feature_map) &
1683                                       MD_FEATURE_RECOVERY_BITMAP))
1684                                         rdev->saved_raid_disk = -1;
1685                         } else
1686                                 set_bit(In_sync, &rdev->flags);
1687                         rdev->raid_disk = role;
1688                         break;
1689                 }
1690                 if (sb->devflags & WriteMostly1)
1691                         set_bit(WriteMostly, &rdev->flags);
1692                 if (sb->devflags & FailFast1)
1693                         set_bit(FailFast, &rdev->flags);
1694                 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
1695                         set_bit(Replacement, &rdev->flags);
1696         } else /* MULTIPATH are always insync */
1697                 set_bit(In_sync, &rdev->flags);
1698
1699         return 0;
1700 }
1701
1702 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1703 {
1704         struct mdp_superblock_1 *sb;
1705         struct md_rdev *rdev2;
1706         int max_dev, i;
1707         /* make rdev->sb match mddev and rdev data. */
1708
1709         sb = page_address(rdev->sb_page);
1710
1711         sb->feature_map = 0;
1712         sb->pad0 = 0;
1713         sb->recovery_offset = cpu_to_le64(0);
1714         memset(sb->pad3, 0, sizeof(sb->pad3));
1715
1716         sb->utime = cpu_to_le64((__u64)mddev->utime);
1717         sb->events = cpu_to_le64(mddev->events);
1718         if (mddev->in_sync)
1719                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1720         else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
1721                 sb->resync_offset = cpu_to_le64(MaxSector);
1722         else
1723                 sb->resync_offset = cpu_to_le64(0);
1724
1725         sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
1726
1727         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1728         sb->size = cpu_to_le64(mddev->dev_sectors);
1729         sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
1730         sb->level = cpu_to_le32(mddev->level);
1731         sb->layout = cpu_to_le32(mddev->layout);
1732         if (test_bit(FailFast, &rdev->flags))
1733                 sb->devflags |= FailFast1;
1734         else
1735                 sb->devflags &= ~FailFast1;
1736
1737         if (test_bit(WriteMostly, &rdev->flags))
1738                 sb->devflags |= WriteMostly1;
1739         else
1740                 sb->devflags &= ~WriteMostly1;
1741         sb->data_offset = cpu_to_le64(rdev->data_offset);
1742         sb->data_size = cpu_to_le64(rdev->sectors);
1743
1744         if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
1745                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
1746                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1747         }
1748
1749         if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
1750             !test_bit(In_sync, &rdev->flags)) {
1751                 sb->feature_map |=
1752                         cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1753                 sb->recovery_offset =
1754                         cpu_to_le64(rdev->recovery_offset);
1755                 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
1756                         sb->feature_map |=
1757                                 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
1758         }
1759         /* Note: recovery_offset and journal_tail share space  */
1760         if (test_bit(Journal, &rdev->flags))
1761                 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
1762         if (test_bit(Replacement, &rdev->flags))
1763                 sb->feature_map |=
1764                         cpu_to_le32(MD_FEATURE_REPLACEMENT);
1765
1766         if (mddev->reshape_position != MaxSector) {
1767                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1768                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1769                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1770                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1771                 sb->new_level = cpu_to_le32(mddev->new_level);
1772                 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
1773                 if (mddev->delta_disks == 0 &&
1774                     mddev->reshape_backwards)
1775                         sb->feature_map
1776                                 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
1777                 if (rdev->new_data_offset != rdev->data_offset) {
1778                         sb->feature_map
1779                                 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
1780                         sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
1781                                                              - rdev->data_offset));
1782                 }
1783         }
1784
1785         if (mddev_is_clustered(mddev))
1786                 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
1787
1788         if (rdev->badblocks.count == 0)
1789                 /* Nothing to do for bad blocks*/ ;
1790         else if (sb->bblog_offset == 0)
1791                 /* Cannot record bad blocks on this device */
1792                 md_error(mddev, rdev);
1793         else {
1794                 struct badblocks *bb = &rdev->badblocks;
1795                 u64 *bbp = (u64 *)page_address(rdev->bb_page);
1796                 u64 *p = bb->page;
1797                 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
1798                 if (bb->changed) {
1799                         unsigned seq;
1800
1801 retry:
1802                         seq = read_seqbegin(&bb->lock);
1803
1804                         memset(bbp, 0xff, PAGE_SIZE);
1805
1806                         for (i = 0 ; i < bb->count ; i++) {
1807                                 u64 internal_bb = p[i];
1808                                 u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
1809                                                 | BB_LEN(internal_bb));
1810                                 bbp[i] = cpu_to_le64(store_bb);
1811                         }
1812                         bb->changed = 0;
1813                         if (read_seqretry(&bb->lock, seq))
1814                                 goto retry;
1815
1816                         bb->sector = (rdev->sb_start +
1817                                       (int)le32_to_cpu(sb->bblog_offset));
1818                         bb->size = le16_to_cpu(sb->bblog_size);
1819                 }
1820         }
1821
1822         max_dev = 0;
1823         rdev_for_each(rdev2, mddev)
1824                 if (rdev2->desc_nr+1 > max_dev)
1825                         max_dev = rdev2->desc_nr+1;
1826
1827         if (max_dev > le32_to_cpu(sb->max_dev)) {
1828                 int bmask;
1829                 sb->max_dev = cpu_to_le32(max_dev);
1830                 rdev->sb_size = max_dev * 2 + 256;
1831                 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1832                 if (rdev->sb_size & bmask)
1833                         rdev->sb_size = (rdev->sb_size | bmask) + 1;
1834         } else
1835                 max_dev = le32_to_cpu(sb->max_dev);
1836
1837         for (i=0; i<max_dev;i++)
1838                 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1839
1840         if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
1841                 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
1842
1843         rdev_for_each(rdev2, mddev) {
1844                 i = rdev2->desc_nr;
1845                 if (test_bit(Faulty, &rdev2->flags))
1846                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
1847                 else if (test_bit(In_sync, &rdev2->flags))
1848                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1849                 else if (test_bit(Journal, &rdev2->flags))
1850                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
1851                 else if (rdev2->raid_disk >= 0)
1852                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1853                 else
1854                         sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
1855         }
1856
1857         sb->sb_csum = calc_sb_1_csum(sb);
1858 }
1859
1860 static unsigned long long
1861 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1862 {
1863         struct mdp_superblock_1 *sb;
1864         sector_t max_sectors;
1865         if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1866                 return 0; /* component must fit device */
1867         if (rdev->data_offset != rdev->new_data_offset)
1868                 return 0; /* too confusing */
1869         if (rdev->sb_start < rdev->data_offset) {
1870                 /* minor versions 1 and 2; superblock before data */
1871                 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
1872                 max_sectors -= rdev->data_offset;
1873                 if (!num_sectors || num_sectors > max_sectors)
1874                         num_sectors = max_sectors;
1875         } else if (rdev->mddev->bitmap_info.offset) {
1876                 /* minor version 0 with bitmap we can't move */
1877                 return 0;
1878         } else {
1879                 /* minor version 0; superblock after data */
1880                 sector_t sb_start;
1881                 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
1882                 sb_start &= ~(sector_t)(4*2 - 1);
1883                 max_sectors = rdev->sectors + sb_start - rdev->sb_start;
1884                 if (!num_sectors || num_sectors > max_sectors)
1885                         num_sectors = max_sectors;
1886                 rdev->sb_start = sb_start;
1887         }
1888         sb = page_address(rdev->sb_page);
1889         sb->data_size = cpu_to_le64(num_sectors);
1890         sb->super_offset = rdev->sb_start;
1891         sb->sb_csum = calc_sb_1_csum(sb);
1892         do {
1893                 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1894                                rdev->sb_page);
1895         } while (md_super_wait(rdev->mddev) < 0);
1896         return num_sectors;
1897
1898 }
1899
1900 static int
1901 super_1_allow_new_offset(struct md_rdev *rdev,
1902                          unsigned long long new_offset)
1903 {
1904         /* All necessary checks on new >= old have been done */
1905         struct bitmap *bitmap;
1906         if (new_offset >= rdev->data_offset)
1907                 return 1;
1908
1909         /* with 1.0 metadata, there is no metadata to tread on
1910          * so we can always move back */
1911         if (rdev->mddev->minor_version == 0)
1912                 return 1;
1913
1914         /* otherwise we must be sure not to step on
1915          * any metadata, so stay:
1916          * 36K beyond start of superblock
1917          * beyond end of badblocks
1918          * beyond write-intent bitmap
1919          */
1920         if (rdev->sb_start + (32+4)*2 > new_offset)
1921                 return 0;
1922         bitmap = rdev->mddev->bitmap;
1923         if (bitmap && !rdev->mddev->bitmap_info.file &&
1924             rdev->sb_start + rdev->mddev->bitmap_info.offset +
1925             bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
1926                 return 0;
1927         if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
1928                 return 0;
1929
1930         return 1;
1931 }
1932
1933 static struct super_type super_types[] = {
1934         [0] = {
1935                 .name   = "0.90.0",
1936                 .owner  = THIS_MODULE,
1937                 .load_super         = super_90_load,
1938                 .validate_super     = super_90_validate,
1939                 .sync_super         = super_90_sync,
1940                 .rdev_size_change   = super_90_rdev_size_change,
1941                 .allow_new_offset   = super_90_allow_new_offset,
1942         },
1943         [1] = {
1944                 .name   = "md-1",
1945                 .owner  = THIS_MODULE,
1946                 .load_super         = super_1_load,
1947                 .validate_super     = super_1_validate,
1948                 .sync_super         = super_1_sync,
1949                 .rdev_size_change   = super_1_rdev_size_change,
1950                 .allow_new_offset   = super_1_allow_new_offset,
1951         },
1952 };
1953
1954 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
1955 {
1956         if (mddev->sync_super) {
1957                 mddev->sync_super(mddev, rdev);
1958                 return;
1959         }
1960
1961         BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1962
1963         super_types[mddev->major_version].sync_super(mddev, rdev);
1964 }
1965
1966 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
1967 {
1968         struct md_rdev *rdev, *rdev2;
1969
1970         rcu_read_lock();
1971         rdev_for_each_rcu(rdev, mddev1) {
1972                 if (test_bit(Faulty, &rdev->flags) ||
1973                     test_bit(Journal, &rdev->flags) ||
1974                     rdev->raid_disk == -1)
1975                         continue;
1976                 rdev_for_each_rcu(rdev2, mddev2) {
1977                         if (test_bit(Faulty, &rdev2->flags) ||
1978                             test_bit(Journal, &rdev2->flags) ||
1979                             rdev2->raid_disk == -1)
1980                                 continue;
1981                         if (rdev->bdev->bd_contains ==
1982                             rdev2->bdev->bd_contains) {
1983                                 rcu_read_unlock();
1984                                 return 1;
1985                         }
1986                 }
1987         }
1988         rcu_read_unlock();
1989         return 0;
1990 }
1991
1992 static LIST_HEAD(pending_raid_disks);
1993
1994 /*
1995  * Try to register data integrity profile for an mddev
1996  *
1997  * This is called when an array is started and after a disk has been kicked
1998  * from the array. It only succeeds if all working and active component devices
1999  * are integrity capable with matching profiles.
2000  */
2001 int md_integrity_register(struct mddev *mddev)
2002 {
2003         struct md_rdev *rdev, *reference = NULL;
2004
2005         if (list_empty(&mddev->disks))
2006                 return 0; /* nothing to do */
2007         if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
2008                 return 0; /* shouldn't register, or already is */
2009         rdev_for_each(rdev, mddev) {
2010                 /* skip spares and non-functional disks */
2011                 if (test_bit(Faulty, &rdev->flags))
2012                         continue;
2013                 if (rdev->raid_disk < 0)
2014                         continue;
2015                 if (!reference) {
2016                         /* Use the first rdev as the reference */
2017                         reference = rdev;
2018                         continue;
2019                 }
2020                 /* does this rdev's profile match the reference profile? */
2021                 if (blk_integrity_compare(reference->bdev->bd_disk,
2022                                 rdev->bdev->bd_disk) < 0)
2023                         return -EINVAL;
2024         }
2025         if (!reference || !bdev_get_integrity(reference->bdev))
2026                 return 0;
2027         /*
2028          * All component devices are integrity capable and have matching
2029          * profiles, register the common profile for the md device.
2030          */
2031         blk_integrity_register(mddev->gendisk,
2032                                bdev_get_integrity(reference->bdev));
2033
2034         pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
2035         if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
2036                 pr_err("md: failed to create integrity pool for %s\n",
2037                        mdname(mddev));
2038                 return -EINVAL;
2039         }
2040         return 0;
2041 }
2042 EXPORT_SYMBOL(md_integrity_register);
2043
2044 /*
2045  * Attempt to add an rdev, but only if it is consistent with the current
2046  * integrity profile
2047  */
2048 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2049 {
2050         struct blk_integrity *bi_rdev;
2051         struct blk_integrity *bi_mddev;
2052         char name[BDEVNAME_SIZE];
2053
2054         if (!mddev->gendisk)
2055                 return 0;
2056
2057         bi_rdev = bdev_get_integrity(rdev->bdev);
2058         bi_mddev = blk_get_integrity(mddev->gendisk);
2059
2060         if (!bi_mddev) /* nothing to do */
2061                 return 0;
2062
2063         if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2064                 pr_err("%s: incompatible integrity profile for %s\n",
2065                        mdname(mddev), bdevname(rdev->bdev, name));
2066                 return -ENXIO;
2067         }
2068
2069         return 0;
2070 }
2071 EXPORT_SYMBOL(md_integrity_add_rdev);
2072
2073 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2074 {
2075         char b[BDEVNAME_SIZE];
2076         struct kobject *ko;
2077         int err;
2078
2079         /* prevent duplicates */
2080         if (find_rdev(mddev, rdev->bdev->bd_dev))
2081                 return -EEXIST;
2082
2083         /* make sure rdev->sectors exceeds mddev->dev_sectors */
2084         if (!test_bit(Journal, &rdev->flags) &&
2085             rdev->sectors &&
2086             (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2087                 if (mddev->pers) {
2088                         /* Cannot change size, so fail
2089                          * If mddev->level <= 0, then we don't care
2090                          * about aligning sizes (e.g. linear)
2091                          */
2092                         if (mddev->level > 0)
2093                                 return -ENOSPC;
2094                 } else
2095                         mddev->dev_sectors = rdev->sectors;
2096         }
2097
2098         /* Verify rdev->desc_nr is unique.
2099          * If it is -1, assign a free number, else
2100          * check number is not in use
2101          */
2102         rcu_read_lock();
2103         if (rdev->desc_nr < 0) {
2104                 int choice = 0;
2105                 if (mddev->pers)
2106                         choice = mddev->raid_disks;
2107                 while (md_find_rdev_nr_rcu(mddev, choice))
2108                         choice++;
2109                 rdev->desc_nr = choice;
2110         } else {
2111                 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2112                         rcu_read_unlock();
2113                         return -EBUSY;
2114                 }
2115         }
2116         rcu_read_unlock();
2117         if (!test_bit(Journal, &rdev->flags) &&
2118             mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2119                 pr_warn("md: %s: array is limited to %d devices\n",
2120                         mdname(mddev), mddev->max_disks);
2121                 return -EBUSY;
2122         }
2123         bdevname(rdev->bdev,b);
2124         strreplace(b, '/', '!');
2125
2126         rdev->mddev = mddev;
2127         pr_debug("md: bind<%s>\n", b);
2128
2129         if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2130                 goto fail;
2131
2132         ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2133         if (sysfs_create_link(&rdev->kobj, ko, "block"))
2134                 /* failure here is OK */;
2135         rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2136
2137         list_add_rcu(&rdev->same_set, &mddev->disks);
2138         bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2139
2140         /* May as well allow recovery to be retried once */
2141         mddev->recovery_disabled++;
2142
2143         return 0;
2144
2145  fail:
2146         pr_warn("md: failed to register dev-%s for %s\n",
2147                 b, mdname(mddev));
2148         return err;
2149 }
2150
2151 static void md_delayed_delete(struct work_struct *ws)
2152 {
2153         struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2154         kobject_del(&rdev->kobj);
2155         kobject_put(&rdev->kobj);
2156 }
2157
2158 static void unbind_rdev_from_array(struct md_rdev *rdev)
2159 {
2160         char b[BDEVNAME_SIZE];
2161
2162         bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2163         list_del_rcu(&rdev->same_set);
2164         pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2165         rdev->mddev = NULL;
2166         sysfs_remove_link(&rdev->kobj, "block");
2167         sysfs_put(rdev->sysfs_state);
2168         rdev->sysfs_state = NULL;
2169         rdev->badblocks.count = 0;
2170         /* We need to delay this, otherwise we can deadlock when
2171          * writing to 'remove' to "dev/state".  We also need
2172          * to delay it due to rcu usage.
2173          */
2174         synchronize_rcu();
2175         INIT_WORK(&rdev->del_work, md_delayed_delete);
2176         kobject_get(&rdev->kobj);
2177         queue_work(md_misc_wq, &rdev->del_work);
2178 }
2179
2180 /*
2181  * prevent the device from being mounted, repartitioned or
2182  * otherwise reused by a RAID array (or any other kernel
2183  * subsystem), by bd_claiming the device.
2184  */
2185 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2186 {
2187         int err = 0;
2188         struct block_device *bdev;
2189         char b[BDEVNAME_SIZE];
2190
2191         bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2192                                  shared ? (struct md_rdev *)lock_rdev : rdev);
2193         if (IS_ERR(bdev)) {
2194                 pr_warn("md: could not open %s.\n", __bdevname(dev, b));
2195                 return PTR_ERR(bdev);
2196         }
2197         rdev->bdev = bdev;
2198         return err;
2199 }
2200
2201 static void unlock_rdev(struct md_rdev *rdev)
2202 {
2203         struct block_device *bdev = rdev->bdev;
2204         rdev->bdev = NULL;
2205         blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2206 }
2207
2208 void md_autodetect_dev(dev_t dev);
2209
2210 static void export_rdev(struct md_rdev *rdev)
2211 {
2212         char b[BDEVNAME_SIZE];
2213
2214         pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2215         md_rdev_clear(rdev);
2216 #ifndef MODULE
2217         if (test_bit(AutoDetected, &rdev->flags))
2218                 md_autodetect_dev(rdev->bdev->bd_dev);
2219 #endif
2220         unlock_rdev(rdev);
2221         kobject_put(&rdev->kobj);
2222 }
2223
2224 void md_kick_rdev_from_array(struct md_rdev *rdev)
2225 {
2226         unbind_rdev_from_array(rdev);
2227         export_rdev(rdev);
2228 }
2229 EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
2230
2231 static void export_array(struct mddev *mddev)
2232 {
2233         struct md_rdev *rdev;
2234
2235         while (!list_empty(&mddev->disks)) {
2236                 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2237                                         same_set);
2238                 md_kick_rdev_from_array(rdev);
2239         }
2240         mddev->raid_disks = 0;
2241         mddev->major_version = 0;
2242 }
2243
2244 static void sync_sbs(struct mddev *mddev, int nospares)
2245 {
2246         /* Update each superblock (in-memory image), but
2247          * if we are allowed to, skip spares which already
2248          * have the right event counter, or have one earlier
2249          * (which would mean they aren't being marked as dirty
2250          * with the rest of the array)
2251          */
2252         struct md_rdev *rdev;
2253         rdev_for_each(rdev, mddev) {
2254                 if (rdev->sb_events == mddev->events ||
2255                     (nospares &&
2256                      rdev->raid_disk < 0 &&
2257                      rdev->sb_events+1 == mddev->events)) {
2258                         /* Don't update this superblock */
2259                         rdev->sb_loaded = 2;
2260                 } else {
2261                         sync_super(mddev, rdev);
2262                         rdev->sb_loaded = 1;
2263                 }
2264         }
2265 }
2266
2267 static bool does_sb_need_changing(struct mddev *mddev)
2268 {
2269         struct md_rdev *rdev;
2270         struct mdp_superblock_1 *sb;
2271         int role;
2272
2273         /* Find a good rdev */
2274         rdev_for_each(rdev, mddev)
2275                 if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
2276                         break;
2277
2278         /* No good device found. */
2279         if (!rdev)
2280                 return false;
2281
2282         sb = page_address(rdev->sb_page);
2283         /* Check if a device has become faulty or a spare become active */
2284         rdev_for_each(rdev, mddev) {
2285                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2286                 /* Device activated? */
2287                 if (role == 0xffff && rdev->raid_disk >=0 &&
2288                     !test_bit(Faulty, &rdev->flags))
2289                         return true;
2290                 /* Device turned faulty? */
2291                 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2292                         return true;
2293         }
2294
2295         /* Check if any mddev parameters have changed */
2296         if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
2297             (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
2298             (mddev->layout != le64_to_cpu(sb->layout)) ||
2299             (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
2300             (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
2301                 return true;
2302
2303         return false;
2304 }
2305
2306 void md_update_sb(struct mddev *mddev, int force_change)
2307 {
2308         struct md_rdev *rdev;
2309         int sync_req;
2310         int nospares = 0;
2311         int any_badblocks_changed = 0;
2312         int ret = -1;
2313
2314         if (mddev->ro) {
2315                 if (force_change)
2316                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2317                 return;
2318         }
2319
2320 repeat:
2321         if (mddev_is_clustered(mddev)) {
2322                 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2323                         force_change = 1;
2324                 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2325                         nospares = 1;
2326                 ret = md_cluster_ops->metadata_update_start(mddev);
2327                 /* Has someone else has updated the sb */
2328                 if (!does_sb_need_changing(mddev)) {
2329                         if (ret == 0)
2330                                 md_cluster_ops->metadata_update_cancel(mddev);
2331                         bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2332                                                          BIT(MD_SB_CHANGE_DEVS) |
2333                                                          BIT(MD_SB_CHANGE_CLEAN));
2334                         return;
2335                 }
2336         }
2337
2338         /* First make sure individual recovery_offsets are correct */
2339         rdev_for_each(rdev, mddev) {
2340                 if (rdev->raid_disk >= 0 &&
2341                     mddev->delta_disks >= 0 &&
2342                     !test_bit(Journal, &rdev->flags) &&
2343                     !test_bit(In_sync, &rdev->flags) &&
2344                     mddev->curr_resync_completed > rdev->recovery_offset)
2345                                 rdev->recovery_offset = mddev->curr_resync_completed;
2346
2347         }
2348         if (!mddev->persistent) {
2349                 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
2350                 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2351                 if (!mddev->external) {
2352                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
2353                         rdev_for_each(rdev, mddev) {
2354                                 if (rdev->badblocks.changed) {
2355                                         rdev->badblocks.changed = 0;
2356                                         ack_all_badblocks(&rdev->badblocks);
2357                                         md_error(mddev, rdev);
2358                                 }
2359                                 clear_bit(Blocked, &rdev->flags);
2360                                 clear_bit(BlockedBadBlocks, &rdev->flags);
2361                                 wake_up(&rdev->blocked_wait);
2362                         }
2363                 }
2364                 wake_up(&mddev->sb_wait);
2365                 return;
2366         }
2367
2368         spin_lock(&mddev->lock);
2369
2370         mddev->utime = ktime_get_real_seconds();
2371
2372         if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
2373                 force_change = 1;
2374         if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
2375                 /* just a clean<-> dirty transition, possibly leave spares alone,
2376                  * though if events isn't the right even/odd, we will have to do
2377                  * spares after all
2378                  */
2379                 nospares = 1;
2380         if (force_change)
2381                 nospares = 0;
2382         if (mddev->degraded)
2383                 /* If the array is degraded, then skipping spares is both
2384                  * dangerous and fairly pointless.
2385                  * Dangerous because a device that was removed from the array
2386                  * might have a event_count that still looks up-to-date,
2387                  * so it can be re-added without a resync.
2388                  * Pointless because if there are any spares to skip,
2389                  * then a recovery will happen and soon that array won't
2390                  * be degraded any more and the spare can go back to sleep then.
2391                  */
2392                 nospares = 0;
2393
2394         sync_req = mddev->in_sync;
2395
2396         /* If this is just a dirty<->clean transition, and the array is clean
2397          * and 'events' is odd, we can roll back to the previous clean state */
2398         if (nospares
2399             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
2400             && mddev->can_decrease_events
2401             && mddev->events != 1) {
2402                 mddev->events--;
2403                 mddev->can_decrease_events = 0;
2404         } else {
2405                 /* otherwise we have to go forward and ... */
2406                 mddev->events ++;
2407                 mddev->can_decrease_events = nospares;
2408         }
2409
2410         /*
2411          * This 64-bit counter should never wrap.
2412          * Either we are in around ~1 trillion A.C., assuming
2413          * 1 reboot per second, or we have a bug...
2414          */
2415         WARN_ON(mddev->events == 0);
2416
2417         rdev_for_each(rdev, mddev) {
2418                 if (rdev->badblocks.changed)
2419                         any_badblocks_changed++;
2420                 if (test_bit(Faulty, &rdev->flags))
2421                         set_bit(FaultRecorded, &rdev->flags);
2422         }
2423
2424         sync_sbs(mddev, nospares);
2425         spin_unlock(&mddev->lock);
2426
2427         pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
2428                  mdname(mddev), mddev->in_sync);
2429
2430         if (mddev->queue)
2431                 blk_add_trace_msg(mddev->queue, "md md_update_sb");
2432 rewrite:
2433         bitmap_update_sb(mddev->bitmap);
2434         rdev_for_each(rdev, mddev) {
2435                 char b[BDEVNAME_SIZE];
2436
2437                 if (rdev->sb_loaded != 1)
2438                         continue; /* no noise on spare devices */
2439
2440                 if (!test_bit(Faulty, &rdev->flags)) {
2441                         md_super_write(mddev,rdev,
2442                                        rdev->sb_start, rdev->sb_size,
2443                                        rdev->sb_page);
2444                         pr_debug("md: (write) %s's sb offset: %llu\n",
2445                                  bdevname(rdev->bdev, b),
2446                                  (unsigned long long)rdev->sb_start);
2447                         rdev->sb_events = mddev->events;
2448                         if (rdev->badblocks.size) {
2449                                 md_super_write(mddev, rdev,
2450                                                rdev->badblocks.sector,
2451                                                rdev->badblocks.size << 9,
2452                                                rdev->bb_page);
2453                                 rdev->badblocks.size = 0;
2454                         }
2455
2456                 } else
2457                         pr_debug("md: %s (skipping faulty)\n",
2458                                  bdevname(rdev->bdev, b));
2459
2460                 if (mddev->level == LEVEL_MULTIPATH)
2461                         /* only need to write one superblock... */
2462                         break;
2463         }
2464         if (md_super_wait(mddev) < 0)
2465                 goto rewrite;
2466         /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
2467
2468         if (mddev_is_clustered(mddev) && ret == 0)
2469                 md_cluster_ops->metadata_update_finish(mddev);
2470
2471         if (mddev->in_sync != sync_req ||
2472             !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
2473                                BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
2474                 /* have to write it out again */
2475                 goto repeat;
2476         wake_up(&mddev->sb_wait);
2477         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
2478                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2479
2480         rdev_for_each(rdev, mddev) {
2481                 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2482                         clear_bit(Blocked, &rdev->flags);
2483
2484                 if (any_badblocks_changed)
2485                         ack_all_badblocks(&rdev->badblocks);
2486                 clear_bit(BlockedBadBlocks, &rdev->flags);
2487                 wake_up(&rdev->blocked_wait);
2488         }
2489 }
2490 EXPORT_SYMBOL(md_update_sb);
2491
2492 static int add_bound_rdev(struct md_rdev *rdev)
2493 {
2494         struct mddev *mddev = rdev->mddev;
2495         int err = 0;
2496         bool add_journal = test_bit(Journal, &rdev->flags);
2497
2498         if (!mddev->pers->hot_remove_disk || add_journal) {
2499                 /* If there is hot_add_disk but no hot_remove_disk
2500                  * then added disks for geometry changes,
2501                  * and should be added immediately.
2502                  */
2503                 super_types[mddev->major_version].
2504                         validate_super(mddev, rdev);
2505                 if (add_journal)
2506                         mddev_suspend(mddev);
2507                 err = mddev->pers->hot_add_disk(mddev, rdev);
2508                 if (add_journal)
2509                         mddev_resume(mddev);
2510                 if (err) {
2511                         md_kick_rdev_from_array(rdev);
2512                         return err;
2513                 }
2514         }
2515         sysfs_notify_dirent_safe(rdev->sysfs_state);
2516
2517         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2518         if (mddev->degraded)
2519                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
2520         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2521         md_new_event(mddev);
2522         md_wakeup_thread(mddev->thread);
2523         return 0;
2524 }
2525
2526 /* words written to sysfs files may, or may not, be \n terminated.
2527  * We want to accept with case. For this we use cmd_match.
2528  */
2529 static int cmd_match(const char *cmd, const char *str)
2530 {
2531         /* See if cmd, written into a sysfs file, matches
2532          * str.  They must either be the same, or cmd can
2533          * have a trailing newline
2534          */
2535         while (*cmd && *str && *cmd == *str) {
2536                 cmd++;
2537                 str++;
2538         }
2539         if (*cmd == '\n')
2540                 cmd++;
2541         if (*str || *cmd)
2542                 return 0;
2543         return 1;
2544 }
2545
2546 struct rdev_sysfs_entry {
2547         struct attribute attr;
2548         ssize_t (*show)(struct md_rdev *, char *);
2549         ssize_t (*store)(struct md_rdev *, const char *, size_t);
2550 };
2551
2552 static ssize_t
2553 state_show(struct md_rdev *rdev, char *page)
2554 {
2555         char *sep = ",";
2556         size_t len = 0;
2557         unsigned long flags = ACCESS_ONCE(rdev->flags);
2558
2559         if (test_bit(Faulty, &flags) ||
2560             (!test_bit(ExternalBbl, &flags) &&
2561             rdev->badblocks.unacked_exist))
2562                 len += sprintf(page+len, "faulty%s", sep);
2563         if (test_bit(In_sync, &flags))
2564                 len += sprintf(page+len, "in_sync%s", sep);
2565         if (test_bit(Journal, &flags))
2566                 len += sprintf(page+len, "journal%s", sep);
2567         if (test_bit(WriteMostly, &flags))
2568                 len += sprintf(page+len, "write_mostly%s", sep);
2569         if (test_bit(Blocked, &flags) ||
2570             (rdev->badblocks.unacked_exist
2571              && !test_bit(Faulty, &flags)))
2572                 len += sprintf(page+len, "blocked%s", sep);
2573         if (!test_bit(Faulty, &flags) &&
2574             !test_bit(Journal, &flags) &&
2575             !test_bit(In_sync, &flags))
2576                 len += sprintf(page+len, "spare%s", sep);
2577         if (test_bit(WriteErrorSeen, &flags))
2578                 len += sprintf(page+len, "write_error%s", sep);
2579         if (test_bit(WantReplacement, &flags))
2580                 len += sprintf(page+len, "want_replacement%s", sep);
2581         if (test_bit(Replacement, &flags))
2582                 len += sprintf(page+len, "replacement%s", sep);
2583         if (test_bit(ExternalBbl, &flags))
2584                 len += sprintf(page+len, "external_bbl%s", sep);
2585         if (test_bit(FailFast, &flags))
2586                 len += sprintf(page+len, "failfast%s", sep);
2587
2588         if (len)
2589                 len -= strlen(sep);
2590
2591         return len+sprintf(page+len, "\n");
2592 }
2593
2594 static ssize_t
2595 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2596 {
2597         /* can write
2598          *  faulty  - simulates an error
2599          *  remove  - disconnects the device
2600          *  writemostly - sets write_mostly
2601          *  -writemostly - clears write_mostly
2602          *  blocked - sets the Blocked flags
2603          *  -blocked - clears the Blocked and possibly simulates an error
2604          *  insync - sets Insync providing device isn't active
2605          *  -insync - clear Insync for a device with a slot assigned,
2606          *            so that it gets rebuilt based on bitmap
2607          *  write_error - sets WriteErrorSeen
2608          *  -write_error - clears WriteErrorSeen
2609          *  {,-}failfast - set/clear FailFast
2610          */
2611         int err = -EINVAL;
2612         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2613                 md_error(rdev->mddev, rdev);
2614                 if (test_bit(Faulty, &rdev->flags))
2615                         err = 0;
2616                 else
2617                         err = -EBUSY;
2618         } else if (cmd_match(buf, "remove")) {
2619                 if (rdev->mddev->pers) {
2620                         clear_bit(Blocked, &rdev->flags);
2621                         remove_and_add_spares(rdev->mddev, rdev);
2622                 }
2623                 if (rdev->raid_disk >= 0)
2624                         err = -EBUSY;
2625                 else {
2626                         struct mddev *mddev = rdev->mddev;
2627                         err = 0;
2628                         if (mddev_is_clustered(mddev))
2629                                 err = md_cluster_ops->remove_disk(mddev, rdev);
2630
2631                         if (err == 0) {
2632                                 md_kick_rdev_from_array(rdev);
2633                                 if (mddev->pers) {
2634                                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2635                                         md_wakeup_thread(mddev->thread);
2636                                 }
2637                                 md_new_event(mddev);
2638                         }
2639                 }
2640         } else if (cmd_match(buf, "writemostly")) {
2641                 set_bit(WriteMostly, &rdev->flags);
2642                 err = 0;
2643         } else if (cmd_match(buf, "-writemostly")) {
2644                 clear_bit(WriteMostly, &rdev->flags);
2645                 err = 0;
2646         } else if (cmd_match(buf, "blocked")) {
2647                 set_bit(Blocked, &rdev->flags);
2648                 err = 0;
2649         } else if (cmd_match(buf, "-blocked")) {
2650                 if (!test_bit(Faulty, &rdev->flags) &&
2651                     !test_bit(ExternalBbl, &rdev->flags) &&
2652                     rdev->badblocks.unacked_exist) {
2653                         /* metadata handler doesn't understand badblocks,
2654                          * so we need to fail the device
2655                          */
2656                         md_error(rdev->mddev, rdev);
2657                 }
2658                 clear_bit(Blocked, &rdev->flags);
2659                 clear_bit(BlockedBadBlocks, &rdev->flags);
2660                 wake_up(&rdev->blocked_wait);
2661                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2662                 md_wakeup_thread(rdev->mddev->thread);
2663
2664                 err = 0;
2665         } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
2666                 set_bit(In_sync, &rdev->flags);
2667                 err = 0;
2668         } else if (cmd_match(buf, "failfast")) {
2669                 set_bit(FailFast, &rdev->flags);
2670                 err = 0;
2671         } else if (cmd_match(buf, "-failfast")) {
2672                 clear_bit(FailFast, &rdev->flags);
2673                 err = 0;
2674         } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
2675                    !test_bit(Journal, &rdev->flags)) {
2676                 if (rdev->mddev->pers == NULL) {
2677                         clear_bit(In_sync, &rdev->flags);
2678                         rdev->saved_raid_disk = rdev->raid_disk;
2679                         rdev->raid_disk = -1;
2680                         err = 0;
2681                 }
2682         } else if (cmd_match(buf, "write_error")) {
2683                 set_bit(WriteErrorSeen, &rdev->flags);
2684                 err = 0;
2685         } else if (cmd_match(buf, "-write_error")) {
2686                 clear_bit(WriteErrorSeen, &rdev->flags);
2687                 err = 0;
2688         } else if (cmd_match(buf, "want_replacement")) {
2689                 /* Any non-spare device that is not a replacement can
2690                  * become want_replacement at any time, but we then need to
2691                  * check if recovery is needed.
2692                  */
2693                 if (rdev->raid_disk >= 0 &&
2694                     !test_bit(Journal, &rdev->flags) &&
2695                     !test_bit(Replacement, &rdev->flags))
2696                         set_bit(WantReplacement, &rdev->flags);
2697                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2698                 md_wakeup_thread(rdev->mddev->thread);
2699                 err = 0;
2700         } else if (cmd_match(buf, "-want_replacement")) {
2701                 /* Clearing 'want_replacement' is always allowed.
2702                  * Once replacements starts it is too late though.
2703                  */
2704                 err = 0;
2705                 clear_bit(WantReplacement, &rdev->flags);
2706         } else if (cmd_match(buf, "replacement")) {
2707                 /* Can only set a device as a replacement when array has not
2708                  * yet been started.  Once running, replacement is automatic
2709                  * from spares, or by assigning 'slot'.
2710                  */
2711                 if (rdev->mddev->pers)
2712                         err = -EBUSY;
2713                 else {
2714                         set_bit(Replacement, &rdev->flags);
2715                         err = 0;
2716                 }
2717         } else if (cmd_match(buf, "-replacement")) {
2718                 /* Similarly, can only clear Replacement before start */
2719                 if (rdev->mddev->pers)
2720                         err = -EBUSY;
2721                 else {
2722                         clear_bit(Replacement, &rdev->flags);
2723                         err = 0;
2724                 }
2725         } else if (cmd_match(buf, "re-add")) {
2726                 if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
2727                         /* clear_bit is performed _after_ all the devices
2728                          * have their local Faulty bit cleared. If any writes
2729                          * happen in the meantime in the local node, they
2730                          * will land in the local bitmap, which will be synced
2731                          * by this node eventually
2732                          */
2733                         if (!mddev_is_clustered(rdev->mddev) ||
2734                             (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
2735                                 clear_bit(Faulty, &rdev->flags);
2736                                 err = add_bound_rdev(rdev);
2737                         }
2738                 } else
2739                         err = -EBUSY;
2740         } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
2741                 set_bit(ExternalBbl, &rdev->flags);
2742                 rdev->badblocks.shift = 0;
2743                 err = 0;
2744         } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
2745                 clear_bit(ExternalBbl, &rdev->flags);
2746                 err = 0;
2747         }
2748         if (!err)
2749                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2750         return err ? err : len;
2751 }
2752 static struct rdev_sysfs_entry rdev_state =
2753 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
2754
2755 static ssize_t
2756 errors_show(struct md_rdev *rdev, char *page)
2757 {
2758         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
2759 }
2760
2761 static ssize_t
2762 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
2763 {
2764         unsigned int n;
2765         int rv;
2766
2767         rv = kstrtouint(buf, 10, &n);
2768         if (rv < 0)
2769                 return rv;
2770         atomic_set(&rdev->corrected_errors, n);
2771         return len;
2772 }
2773 static struct rdev_sysfs_entry rdev_errors =
2774 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
2775
2776 static ssize_t
2777 slot_show(struct md_rdev *rdev, char *page)
2778 {
2779         if (test_bit(Journal, &rdev->flags))
2780                 return sprintf(page, "journal\n");
2781         else if (rdev->raid_disk < 0)
2782                 return sprintf(page, "none\n");
2783         else
2784                 return sprintf(page, "%d\n", rdev->raid_disk);
2785 }
2786
2787 static ssize_t
2788 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
2789 {
2790         int slot;
2791         int err;
2792
2793         if (test_bit(Journal, &rdev->flags))
2794                 return -EBUSY;
2795         if (strncmp(buf, "none", 4)==0)
2796                 slot = -1;
2797         else {
2798                 err = kstrtouint(buf, 10, (unsigned int *)&slot);
2799                 if (err < 0)
2800                         return err;
2801         }
2802         if (rdev->mddev->pers && slot == -1) {
2803                 /* Setting 'slot' on an active array requires also
2804                  * updating the 'rd%d' link, and communicating
2805                  * with the personality with ->hot_*_disk.
2806                  * For now we only support removing
2807                  * failed/spare devices.  This normally happens automatically,
2808                  * but not when the metadata is externally managed.
2809                  */
2810                 if (rdev->raid_disk == -1)
2811                         return -EEXIST;
2812                 /* personality does all needed checks */
2813                 if (rdev->mddev->pers->hot_remove_disk == NULL)
2814                         return -EINVAL;
2815                 clear_bit(Blocked, &rdev->flags);
2816                 remove_and_add_spares(rdev->mddev, rdev);
2817                 if (rdev->raid_disk >= 0)
2818                         return -EBUSY;
2819                 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
2820                 md_wakeup_thread(rdev->mddev->thread);
2821         } else if (rdev->mddev->pers) {
2822                 /* Activating a spare .. or possibly reactivating
2823                  * if we ever get bitmaps working here.
2824                  */
2825                 int err;
2826
2827                 if (rdev->raid_disk != -1)
2828                         return -EBUSY;
2829
2830                 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
2831                         return -EBUSY;
2832
2833                 if (rdev->mddev->pers->hot_add_disk == NULL)
2834                         return -EINVAL;
2835
2836                 if (slot >= rdev->mddev->raid_disks &&
2837                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2838                         return -ENOSPC;
2839
2840                 rdev->raid_disk = slot;
2841                 if (test_bit(In_sync, &rdev->flags))
2842                         rdev->saved_raid_disk = slot;
2843                 else
2844                         rdev->saved_raid_disk = -1;
2845                 clear_bit(In_sync, &rdev->flags);
2846                 clear_bit(Bitmap_sync, &rdev->flags);
2847                 err = rdev->mddev->pers->
2848                         hot_add_disk(rdev->mddev, rdev);
2849                 if (err) {
2850                         rdev->raid_disk = -1;
2851                         return err;
2852                 } else
2853                         sysfs_notify_dirent_safe(rdev->sysfs_state);
2854                 if (sysfs_link_rdev(rdev->mddev, rdev))
2855                         /* failure here is OK */;
2856                 /* don't wakeup anyone, leave that to userspace. */
2857         } else {
2858                 if (slot >= rdev->mddev->raid_disks &&
2859                     slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
2860                         return -ENOSPC;
2861                 rdev->raid_disk = slot;
2862                 /* assume it is working */
2863                 clear_bit(Faulty, &rdev->flags);
2864                 clear_bit(WriteMostly, &rdev->flags);
2865                 set_bit(In_sync, &rdev->flags);
2866                 sysfs_notify_dirent_safe(rdev->sysfs_state);
2867         }
2868         return len;
2869 }
2870
2871 static struct rdev_sysfs_entry rdev_slot =
2872 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
2873
2874 static ssize_t
2875 offset_show(struct md_rdev *rdev, char *page)
2876 {
2877         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
2878 }
2879
2880 static ssize_t
2881 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
2882 {
2883         unsigned long long offset;
2884         if (kstrtoull(buf, 10, &offset) < 0)
2885                 return -EINVAL;
2886         if (rdev->mddev->pers && rdev->raid_disk >= 0)
2887                 return -EBUSY;
2888         if (rdev->sectors && rdev->mddev->external)
2889                 /* Must set offset before size, so overlap checks
2890                  * can be sane */
2891                 return -EBUSY;
2892         rdev->data_offset = offset;
2893         rdev->new_data_offset = offset;
2894         return len;
2895 }
2896
2897 static struct rdev_sysfs_entry rdev_offset =
2898 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
2899
2900 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
2901 {
2902         return sprintf(page, "%llu\n",
2903                        (unsigned long long)rdev->new_data_offset);
2904 }
2905
2906 static ssize_t new_offset_store(struct md_rdev *rdev,
2907                                 const char *buf, size_t len)
2908 {
2909         unsigned long long new_offset;
2910         struct mddev *mddev = rdev->mddev;
2911
2912         if (kstrtoull(buf, 10, &new_offset) < 0)
2913                 return -EINVAL;
2914
2915         if (mddev->sync_thread ||
2916             test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
2917                 return -EBUSY;
2918         if (new_offset == rdev->data_offset)
2919                 /* reset is always permitted */
2920                 ;
2921         else if (new_offset > rdev->data_offset) {
2922                 /* must not push array size beyond rdev_sectors */
2923                 if (new_offset - rdev->data_offset
2924                     + mddev->dev_sectors > rdev->sectors)
2925                                 return -E2BIG;
2926         }
2927         /* Metadata worries about other space details. */
2928
2929         /* decreasing the offset is inconsistent with a backwards
2930          * reshape.
2931          */
2932         if (new_offset < rdev->data_offset &&
2933             mddev->reshape_backwards)
2934                 return -EINVAL;
2935         /* Increasing offset is inconsistent with forwards
2936          * reshape.  reshape_direction should be set to
2937          * 'backwards' first.
2938          */
2939         if (new_offset > rdev->data_offset &&
2940             !mddev->reshape_backwards)
2941                 return -EINVAL;
2942
2943         if (mddev->pers && mddev->persistent &&
2944             !super_types[mddev->major_version]
2945             .allow_new_offset(rdev, new_offset))
2946                 return -E2BIG;
2947         rdev->new_data_offset = new_offset;
2948         if (new_offset > rdev->data_offset)
2949                 mddev->reshape_backwards = 1;
2950         else if (new_offset < rdev->data_offset)
2951                 mddev->reshape_backwards = 0;
2952
2953         return len;
2954 }
2955 static struct rdev_sysfs_entry rdev_new_offset =
2956 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
2957
2958 static ssize_t
2959 rdev_size_show(struct md_rdev *rdev, char *page)
2960 {
2961         return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
2962 }
2963
2964 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
2965 {
2966         /* check if two start/length pairs overlap */
2967         if (s1+l1 <= s2)
2968                 return 0;
2969         if (s2+l2 <= s1)
2970                 return 0;
2971         return 1;
2972 }
2973
2974 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
2975 {
2976         unsigned long long blocks;
2977         sector_t new;
2978
2979         if (kstrtoull(buf, 10, &blocks) < 0)
2980                 return -EINVAL;
2981
2982         if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
2983                 return -EINVAL; /* sector conversion overflow */
2984
2985         new = blocks * 2;
2986         if (new != blocks * 2)
2987                 return -EINVAL; /* unsigned long long to sector_t overflow */
2988
2989         *sectors = new;
2990         return 0;
2991 }
2992
2993 static ssize_t
2994 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
2995 {
2996         struct mddev *my_mddev = rdev->mddev;
2997         sector_t oldsectors = rdev->sectors;
2998         sector_t sectors;
2999
3000         if (test_bit(Journal, &rdev->flags))
3001                 return -EBUSY;
3002         if (strict_blocks_to_sectors(buf, &sectors) < 0)
3003                 return -EINVAL;
3004         if (rdev->data_offset != rdev->new_data_offset)
3005                 return -EINVAL; /* too confusing */
3006         if (my_mddev->pers && rdev->raid_disk >= 0) {
3007                 if (my_mddev->persistent) {
3008                         sectors = super_types[my_mddev->major_version].
3009                                 rdev_size_change(rdev, sectors);
3010                         if (!sectors)
3011                                 return -EBUSY;
3012                 } else if (!sectors)
3013                         sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3014                                 rdev->data_offset;
3015                 if (!my_mddev->pers->resize)
3016                         /* Cannot change size for RAID0 or Linear etc */
3017                         return -EINVAL;
3018         }
3019         if (sectors < my_mddev->dev_sectors)
3020                 return -EINVAL; /* component must fit device */
3021
3022         rdev->sectors = sectors;
3023         if (sectors > oldsectors && my_mddev->external) {
3024                 /* Need to check that all other rdevs with the same
3025                  * ->bdev do not overlap.  'rcu' is sufficient to walk
3026                  * the rdev lists safely.
3027                  * This check does not provide a hard guarantee, it
3028                  * just helps avoid dangerous mistakes.
3029                  */
3030                 struct mddev *mddev;
3031                 int overlap = 0;
3032                 struct list_head *tmp;
3033
3034                 rcu_read_lock();
3035                 for_each_mddev(mddev, tmp) {
3036                         struct md_rdev *rdev2;
3037
3038                         rdev_for_each(rdev2, mddev)
3039                                 if (rdev->bdev == rdev2->bdev &&
3040                                     rdev != rdev2 &&
3041                                     overlaps(rdev->data_offset, rdev->sectors,
3042                                              rdev2->data_offset,
3043                                              rdev2->sectors)) {
3044                                         overlap = 1;
3045                                         break;
3046                                 }
3047                         if (overlap) {
3048                                 mddev_put(mddev);
3049                                 break;
3050                         }
3051                 }
3052                 rcu_read_unlock();
3053                 if (overlap) {
3054                         /* Someone else could have slipped in a size
3055                          * change here, but doing so is just silly.
3056                          * We put oldsectors back because we *know* it is
3057                          * safe, and trust userspace not to race with
3058                          * itself
3059                          */
3060                         rdev->sectors = oldsectors;
3061                         return -EBUSY;
3062                 }
3063         }
3064         return len;
3065 }
3066
3067 static struct rdev_sysfs_entry rdev_size =
3068 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
3069
3070 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3071 {
3072         unsigned long long recovery_start = rdev->recovery_offset;
3073
3074         if (test_bit(In_sync, &rdev->flags) ||
3075             recovery_start == MaxSector)
3076                 return sprintf(page, "none\n");
3077
3078         return sprintf(page, "%llu\n", recovery_start);
3079 }
3080
3081 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3082 {
3083         unsigned long long recovery_start;
3084
3085         if (cmd_match(buf, "none"))
3086                 recovery_start = MaxSector;
3087         else if (kstrtoull(buf, 10, &recovery_start))
3088                 return -EINVAL;
3089
3090         if (rdev->mddev->pers &&
3091             rdev->raid_disk >= 0)
3092                 return -EBUSY;
3093
3094         rdev->recovery_offset = recovery_start;
3095         if (recovery_start == MaxSector)
3096                 set_bit(In_sync, &rdev->flags);
3097         else
3098                 clear_bit(In_sync, &rdev->flags);
3099         return len;
3100 }
3101
3102 static struct rdev_sysfs_entry rdev_recovery_start =
3103 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
3104
3105 /* sysfs access to bad-blocks list.
3106  * We present two files.
3107  * 'bad-blocks' lists sector numbers and lengths of ranges that
3108  *    are recorded as bad.  The list is truncated to fit within
3109  *    the one-page limit of sysfs.
3110  *    Writing "sector length" to this file adds an acknowledged
3111  *    bad block list.
3112  * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
3113  *    been acknowledged.  Writing to this file adds bad blocks
3114  *    without acknowledging them.  This is largely for testing.
3115  */
3116 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3117 {
3118         return badblocks_show(&rdev->badblocks, page, 0);
3119 }
3120 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3121 {
3122         int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3123         /* Maybe that ack was all we needed */
3124         if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3125                 wake_up(&rdev->blocked_wait);
3126         return rv;
3127 }
3128 static struct rdev_sysfs_entry rdev_bad_blocks =
3129 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
3130
3131 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3132 {
3133         return badblocks_show(&rdev->badblocks, page, 1);
3134 }
3135 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3136 {
3137         return badblocks_store(&rdev->badblocks, page, len, 1);
3138 }
3139 static struct rdev_sysfs_entry rdev_unack_bad_blocks =
3140 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
3141
3142 static struct attribute *rdev_default_attrs[] = {
3143         &rdev_state.attr,
3144         &rdev_errors.attr,
3145         &rdev_slot.attr,
3146         &rdev_offset.attr,
3147         &rdev_new_offset.attr,
3148         &rdev_size.attr,
3149         &rdev_recovery_start.attr,
3150         &rdev_bad_blocks.attr,
3151         &rdev_unack_bad_blocks.attr,
3152         NULL,
3153 };
3154 static ssize_t
3155 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3156 {
3157         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3158         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3159
3160         if (!entry->show)
3161                 return -EIO;
3162         if (!rdev->mddev)
3163                 return -EBUSY;
3164         return entry->show(rdev, page);
3165 }
3166
3167 static ssize_t
3168 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
3169               const char *page, size_t length)
3170 {
3171         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
3172         struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3173         ssize_t rv;
3174         struct mddev *mddev = rdev->mddev;
3175
3176         if (!entry->store)
3177                 return -EIO;
3178         if (!capable(CAP_SYS_ADMIN))
3179                 return -EACCES;
3180         rv = mddev ? mddev_lock(mddev): -EBUSY;
3181         if (!rv) {
3182                 if (rdev->mddev == NULL)
3183                         rv = -EBUSY;
3184                 else
3185                         rv = entry->store(rdev, page, length);
3186                 mddev_unlock(mddev);
3187         }
3188         return rv;
3189 }
3190
3191 static void rdev_free(struct kobject *ko)
3192 {
3193         struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3194         kfree(rdev);
3195 }
3196 static const struct sysfs_ops rdev_sysfs_ops = {
3197         .show           = rdev_attr_show,
3198         .store          = rdev_attr_store,
3199 };
3200 static struct kobj_type rdev_ktype = {
3201         .release        = rdev_free,
3202         .sysfs_ops      = &rdev_sysfs_ops,
3203         .default_attrs  = rdev_default_attrs,
3204 };
3205
3206 int md_rdev_init(struct md_rdev *rdev)
3207 {
3208         rdev->desc_nr = -1;
3209         rdev->saved_raid_disk = -1;
3210         rdev->raid_disk = -1;
3211         rdev->flags = 0;
3212         rdev->data_offset = 0;
3213         rdev->new_data_offset = 0;
3214         rdev->sb_events = 0;
3215         rdev->last_read_error = 0;
3216         rdev->sb_loaded = 0;
3217         rdev->bb_page = NULL;
3218         atomic_set(&rdev->nr_pending, 0);
3219         atomic_set(&rdev->read_errors, 0);
3220         atomic_set(&rdev->corrected_errors, 0);
3221
3222         INIT_LIST_HEAD(&rdev->same_set);
3223         init_waitqueue_head(&rdev->blocked_wait);
3224
3225         /* Add space to store bad block list.
3226          * This reserves the space even on arrays where it cannot
3227          * be used - I wonder if that matters
3228          */
3229         return badblocks_init(&rdev->badblocks, 0);
3230 }
3231 EXPORT_SYMBOL_GPL(md_rdev_init);
3232 /*
3233  * Import a device. If 'super_format' >= 0, then sanity check the superblock
3234  *
3235  * mark the device faulty if:
3236  *
3237  *   - the device is nonexistent (zero size)
3238  *   - the device has no valid superblock
3239  *
3240  * a faulty rdev _never_ has rdev->sb set.
3241  */
3242 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
3243 {
3244         char b[BDEVNAME_SIZE];
3245         int err;
3246         struct md_rdev *rdev;
3247         sector_t size;
3248
3249         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3250         if (!rdev)
3251                 return ERR_PTR(-ENOMEM);
3252
3253         err = md_rdev_init(rdev);
3254         if (err)
3255                 goto abort_free;
3256         err = alloc_disk_sb(rdev);
3257         if (err)
3258                 goto abort_free;
3259
3260         err = lock_rdev(rdev, newdev, super_format == -2);
3261         if (err)
3262                 goto abort_free;
3263
3264         kobject_init(&rdev->kobj, &rdev_ktype);
3265
3266         size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3267         if (!size) {
3268                 pr_warn("md: %s has zero or unknown size, marking faulty!\n",
3269                         bdevname(rdev->bdev,b));
3270                 err = -EINVAL;
3271                 goto abort_free;
3272         }
3273
3274         if (super_format >= 0) {
3275                 err = super_types[super_format].
3276                         load_super(rdev, NULL, super_minor);
3277                 if (err == -EINVAL) {
3278                         pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
3279                                 bdevname(rdev->bdev,b),
3280                                 super_format, super_minor);
3281                         goto abort_free;
3282                 }
3283                 if (err < 0) {
3284                         pr_warn("md: could not read %s's sb, not importing!\n",
3285                                 bdevname(rdev->bdev,b));
3286                         goto abort_free;
3287                 }
3288         }
3289
3290         return rdev;
3291
3292 abort_free:
3293         if (rdev->bdev)
3294                 unlock_rdev(rdev);
3295         md_rdev_clear(rdev);
3296         kfree(rdev);
3297         return ERR_PTR(err);
3298 }
3299
3300 /*
3301  * Check a full RAID array for plausibility
3302  */
3303
3304 static void analyze_sbs(struct mddev *mddev)
3305 {
3306         int i;
3307         struct md_rdev *rdev, *freshest, *tmp;
3308         char b[BDEVNAME_SIZE];
3309
3310         freshest = NULL;
3311         rdev_for_each_safe(rdev, tmp, mddev)
3312                 switch (super_types[mddev->major_version].
3313                         load_super(rdev, freshest, mddev->minor_version)) {
3314                 case 1:
3315                         freshest = rdev;
3316                         break;
3317                 case 0:
3318                         break;
3319                 default:
3320                         pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
3321                                 bdevname(rdev->bdev,b));
3322                         md_kick_rdev_from_array(rdev);
3323                 }
3324
3325         super_types[mddev->major_version].
3326                 validate_super(mddev, freshest);
3327
3328         i = 0;
3329         rdev_for_each_safe(rdev, tmp, mddev) {
3330                 if (mddev->max_disks &&
3331                     (rdev->desc_nr >= mddev->max_disks ||
3332                      i > mddev->max_disks)) {
3333                         pr_warn("md: %s: %s: only %d devices permitted\n",
3334                                 mdname(mddev), bdevname(rdev->bdev, b),
3335                                 mddev->max_disks);
3336                         md_kick_rdev_from_array(rdev);
3337                         continue;
3338                 }
3339                 if (rdev != freshest) {
3340                         if (super_types[mddev->major_version].
3341                             validate_super(mddev, rdev)) {
3342                                 pr_warn("md: kicking non-fresh %s from array!\n",
3343                                         bdevname(rdev->bdev,b));
3344                                 md_kick_rdev_from_array(rdev);
3345                                 continue;
3346                         }
3347                 }
3348                 if (mddev->level == LEVEL_MULTIPATH) {
3349                         rdev->desc_nr = i++;
3350                         rdev->raid_disk = rdev->desc_nr;
3351                         set_bit(In_sync, &rdev->flags);
3352                 } else if (rdev->raid_disk >=
3353                             (mddev->raid_disks - min(0, mddev->delta_disks)) &&
3354                            !test_bit(Journal, &rdev->flags)) {
3355                         rdev->raid_disk = -1;
3356                         clear_bit(In_sync, &rdev->flags);
3357                 }
3358         }
3359 }
3360
3361 /* Read a fixed-point number.
3362  * Numbers in sysfs attributes should be in "standard" units where
3363  * possible, so time should be in seconds.
3364  * However we internally use a a much smaller unit such as
3365  * milliseconds or jiffies.
3366  * This function takes a decimal number with a possible fractional
3367  * component, and produces an integer which is the result of
3368  * multiplying that number by 10^'scale'.
3369  * all without any floating-point arithmetic.
3370  */
3371 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
3372 {
3373         unsigned long result = 0;
3374         long decimals = -1;
3375         while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
3376                 if (*cp == '.')
3377                         decimals = 0;
3378                 else if (decimals < scale) {
3379                         unsigned int value;
3380                         value = *cp - '0';
3381                         result = result * 10 + value;
3382                         if (decimals >= 0)
3383                                 decimals++;
3384                 }
3385                 cp++;
3386         }
3387         if (*cp == '\n')
3388                 cp++;
3389         if (*cp)
3390                 return -EINVAL;
3391         if (decimals < 0)
3392                 decimals = 0;
3393         while (decimals < scale) {
3394                 result *= 10;
3395                 decimals ++;
3396         }
3397         *res = result;
3398         return 0;
3399 }
3400
3401 static ssize_t
3402 safe_delay_show(struct mddev *mddev, char *page)
3403 {
3404         int msec = (mddev->safemode_delay*1000)/HZ;
3405         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
3406 }
3407 static ssize_t
3408 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
3409 {
3410         unsigned long msec;
3411
3412         if (mddev_is_clustered(mddev)) {
3413                 pr_warn("md: Safemode is disabled for clustered mode\n");
3414                 return -EINVAL;
3415         }
3416
3417         if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
3418                 return -EINVAL;
3419         if (msec == 0)
3420                 mddev->safemode_delay = 0;
3421         else {
3422                 unsigned long old_delay = mddev->safemode_delay;
3423                 unsigned long new_delay = (msec*HZ)/1000;
3424
3425                 if (new_delay == 0)
3426                         new_delay = 1;
3427                 mddev->safemode_delay = new_delay;
3428                 if (new_delay < old_delay || old_delay == 0)
3429                         mod_timer(&mddev->safemode_timer, jiffies+1);
3430         }
3431         return len;
3432 }
3433 static struct md_sysfs_entry md_safe_delay =
3434 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
3435
3436 static ssize_t
3437 level_show(struct mddev *mddev, char *page)
3438 {
3439         struct md_personality *p;
3440         int ret;
3441         spin_lock(&mddev->lock);
3442         p = mddev->pers;
3443         if (p)
3444                 ret = sprintf(page, "%s\n", p->name);
3445         else if (mddev->clevel[0])
3446                 ret = sprintf(page, "%s\n", mddev->clevel);
3447         else if (mddev->level != LEVEL_NONE)
3448                 ret = sprintf(page, "%d\n", mddev->level);
3449         else
3450                 ret = 0;
3451         spin_unlock(&mddev->lock);
3452         return ret;
3453 }
3454
3455 static ssize_t
3456 level_store(struct mddev *mddev, const char *buf, size_t len)
3457 {
3458         char clevel[16];
3459         ssize_t rv;
3460         size_t slen = len;
3461         struct md_personality *pers, *oldpers;
3462         long level;
3463         void *priv, *oldpriv;
3464         struct md_rdev *rdev;
3465
3466         if (slen == 0 || slen >= sizeof(clevel))
3467                 return -EINVAL;
3468
3469         rv = mddev_lock(mddev);
3470         if (rv)
3471                 return rv;
3472
3473         if (mddev->pers == NULL) {
3474                 strncpy(mddev->clevel, buf, slen);
3475                 if (mddev->clevel[slen-1] == '\n')
3476                         slen--;
3477                 mddev->clevel[slen] = 0;
3478                 mddev->level = LEVEL_NONE;
3479                 rv = len;
3480                 goto out_unlock;
3481         }
3482         rv = -EROFS;
3483         if (mddev->ro)
3484                 goto out_unlock;
3485
3486         /* request to change the personality.  Need to ensure:
3487          *  - array is not engaged in resync/recovery/reshape
3488          *  - old personality can be suspended
3489          *  - new personality will access other array.
3490          */
3491
3492         rv = -EBUSY;
3493         if (mddev->sync_thread ||
3494             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3495             mddev->reshape_position != MaxSector ||
3496             mddev->sysfs_active)
3497                 goto out_unlock;
3498
3499         rv = -EINVAL;
3500         if (!mddev->pers->quiesce) {
3501                 pr_warn("md: %s: %s does not support online personality change\n",
3502                         mdname(mddev), mddev->pers->name);
3503                 goto out_unlock;
3504         }
3505
3506         /* Now find the new personality */
3507         strncpy(clevel, buf, slen);
3508         if (clevel[slen-1] == '\n')
3509                 slen--;
3510         clevel[slen] = 0;
3511         if (kstrtol(clevel, 10, &level))
3512                 level = LEVEL_NONE;
3513
3514         if (request_module("md-%s", clevel) != 0)
3515                 request_module("md-level-%s", clevel);
3516         spin_lock(&pers_lock);
3517         pers = find_pers(level, clevel);
3518         if (!pers || !try_module_get(pers->owner)) {
3519                 spin_unlock(&pers_lock);
3520                 pr_warn("md: personality %s not loaded\n", clevel);
3521                 rv = -EINVAL;
3522                 goto out_unlock;
3523         }
3524         spin_unlock(&pers_lock);
3525
3526         if (pers == mddev->pers) {
3527                 /* Nothing to do! */
3528                 module_put(pers->owner);
3529                 rv = len;
3530                 goto out_unlock;
3531         }
3532         if (!pers->takeover) {
3533                 module_put(pers->owner);
3534                 pr_warn("md: %s: %s does not support personality takeover\n",
3535                         mdname(mddev), clevel);
3536                 rv = -EINVAL;
3537                 goto out_unlock;
3538         }
3539
3540         rdev_for_each(rdev, mddev)
3541                 rdev->new_raid_disk = rdev->raid_disk;
3542
3543         /* ->takeover must set new_* and/or delta_disks
3544          * if it succeeds, and may set them when it fails.
3545          */
3546         priv = pers->takeover(mddev);
3547         if (IS_ERR(priv)) {
3548                 mddev->new_level = mddev->level;
3549                 mddev->new_layout = mddev->layout;
3550                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3551                 mddev->raid_disks -= mddev->delta_disks;
3552                 mddev->delta_disks = 0;
3553                 mddev->reshape_backwards = 0;
3554                 module_put(pers->owner);
3555                 pr_warn("md: %s: %s would not accept array\n",
3556                         mdname(mddev), clevel);
3557                 rv = PTR_ERR(priv);
3558                 goto out_unlock;
3559         }
3560
3561         /* Looks like we have a winner */
3562         mddev_suspend(mddev);
3563         mddev_detach(mddev);
3564
3565         spin_lock(&mddev->lock);
3566         oldpers = mddev->pers;
3567         oldpriv = mddev->private;
3568         mddev->pers = pers;
3569         mddev->private = priv;
3570         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
3571         mddev->level = mddev->new_level;
3572         mddev->layout = mddev->new_layout;
3573         mddev->chunk_sectors = mddev->new_chunk_sectors;
3574         mddev->delta_disks = 0;
3575         mddev->reshape_backwards = 0;
3576         mddev->degraded = 0;
3577         spin_unlock(&mddev->lock);
3578
3579         if (oldpers->sync_request == NULL &&
3580             mddev->external) {
3581                 /* We are converting from a no-redundancy array
3582                  * to a redundancy array and metadata is managed
3583                  * externally so we need to be sure that writes
3584                  * won't block due to a need to transition
3585                  *      clean->dirty
3586                  * until external management is started.
3587                  */
3588                 mddev->in_sync = 0;
3589                 mddev->safemode_delay = 0;
3590                 mddev->safemode = 0;
3591         }
3592
3593         oldpers->free(mddev, oldpriv);
3594
3595         if (oldpers->sync_request == NULL &&
3596             pers->sync_request != NULL) {
3597                 /* need to add the md_redundancy_group */
3598                 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
3599                         pr_warn("md: cannot register extra attributes for %s\n",
3600                                 mdname(mddev));
3601                 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
3602         }
3603         if (oldpers->sync_request != NULL &&
3604             pers->sync_request == NULL) {
3605                 /* need to remove the md_redundancy_group */
3606                 if (mddev->to_remove == NULL)
3607                         mddev->to_remove = &md_redundancy_group;
3608         }
3609
3610         module_put(oldpers->owner);
3611
3612         rdev_for_each(rdev, mddev) {
3613                 if (rdev->raid_disk < 0)
3614                         continue;
3615                 if (rdev->new_raid_disk >= mddev->raid_disks)
3616                         rdev->new_raid_disk = -1;
3617                 if (rdev->new_raid_disk == rdev->raid_disk)
3618                         continue;
3619                 sysfs_unlink_rdev(mddev, rdev);
3620         }
3621         rdev_for_each(rdev, mddev) {
3622                 if (rdev->raid_disk < 0)
3623                         continue;
3624                 if (rdev->new_raid_disk == rdev->raid_disk)
3625                         continue;
3626                 rdev->raid_disk = rdev->new_raid_disk;
3627                 if (rdev->raid_disk < 0)
3628                         clear_bit(In_sync, &rdev->flags);
3629                 else {
3630                         if (sysfs_link_rdev(mddev, rdev))
3631                                 pr_warn("md: cannot register rd%d for %s after level change\n",
3632                                         rdev->raid_disk, mdname(mddev));
3633                 }
3634         }
3635
3636         if (pers->sync_request == NULL) {
3637                 /* this is now an array without redundancy, so
3638                  * it must always be in_sync
3639                  */
3640                 mddev->in_sync = 1;
3641                 del_timer_sync(&mddev->safemode_timer);
3642         }
3643         blk_set_stacking_limits(&mddev->queue->limits);
3644         pers->run(mddev);
3645         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3646         mddev_resume(mddev);
3647         if (!mddev->thread)
3648                 md_update_sb(mddev, 1);
3649         sysfs_notify(&mddev->kobj, NULL, "level");
3650         md_new_event(mddev);
3651         rv = len;
3652 out_unlock:
3653         mddev_unlock(mddev);
3654         return rv;
3655 }
3656
3657 static struct md_sysfs_entry md_level =
3658 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
3659
3660 static ssize_t
3661 layout_show(struct mddev *mddev, char *page)
3662 {
3663         /* just a number, not meaningful for all levels */
3664         if (mddev->reshape_position != MaxSector &&
3665             mddev->layout != mddev->new_layout)
3666                 return sprintf(page, "%d (%d)\n",
3667                                mddev->new_layout, mddev->layout);
3668         return sprintf(page, "%d\n", mddev->layout);
3669 }
3670
3671 static ssize_t
3672 layout_store(struct mddev *mddev, const char *buf, size_t len)
3673 {
3674         unsigned int n;
3675         int err;
3676
3677         err = kstrtouint(buf, 10, &n);
3678         if (err < 0)
3679                 return err;
3680         err = mddev_lock(mddev);
3681         if (err)
3682                 return err;
3683
3684         if (mddev->pers) {
3685                 if (mddev->pers->check_reshape == NULL)
3686                         err = -EBUSY;
3687                 else if (mddev->ro)
3688                         err = -EROFS;
3689                 else {
3690                         mddev->new_layout = n;
3691                         err = mddev->pers->check_reshape(mddev);
3692                         if (err)
3693                                 mddev->new_layout = mddev->layout;
3694                 }
3695         } else {
3696                 mddev->new_layout = n;
3697                 if (mddev->reshape_position == MaxSector)
3698                         mddev->layout = n;
3699         }
3700         mddev_unlock(mddev);
3701         return err ?: len;
3702 }
3703 static struct md_sysfs_entry md_layout =
3704 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
3705
3706 static ssize_t
3707 raid_disks_show(struct mddev *mddev, char *page)
3708 {
3709         if (mddev->raid_disks == 0)
3710                 return 0;
3711         if (mddev->reshape_position != MaxSector &&
3712             mddev->delta_disks != 0)
3713                 return sprintf(page, "%d (%d)\n", mddev->raid_disks,
3714                                mddev->raid_disks - mddev->delta_disks);
3715         return sprintf(page, "%d\n", mddev->raid_disks);
3716 }
3717
3718 static int update_raid_disks(struct mddev *mddev, int raid_disks);
3719
3720 static ssize_t
3721 raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
3722 {
3723         unsigned int n;
3724         int err;
3725
3726         err = kstrtouint(buf, 10, &n);
3727         if (err < 0)
3728                 return err;
3729
3730         err = mddev_lock(mddev);
3731         if (err)
3732                 return err;
3733         if (mddev->pers)
3734                 err = update_raid_disks(mddev, n);
3735         else if (mddev->reshape_position != MaxSector) {
3736                 struct md_rdev *rdev;
3737                 int olddisks = mddev->raid_disks - mddev->delta_disks;
3738
3739                 err = -EINVAL;
3740                 rdev_for_each(rdev, mddev) {
3741                         if (olddisks < n &&
3742                             rdev->data_offset < rdev->new_data_offset)
3743                                 goto out_unlock;
3744                         if (olddisks > n &&
3745                             rdev->data_offset > rdev->new_data_offset)
3746                                 goto out_unlock;
3747                 }
3748                 err = 0;
3749                 mddev->delta_disks = n - olddisks;
3750                 mddev->raid_disks = n;
3751                 mddev->reshape_backwards = (mddev->delta_disks < 0);
3752         } else
3753                 mddev->raid_disks = n;
3754 out_unlock:
3755         mddev_unlock(mddev);
3756         return err ? err : len;
3757 }
3758 static struct md_sysfs_entry md_raid_disks =
3759 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
3760
3761 static ssize_t
3762 chunk_size_show(struct mddev *mddev, char *page)
3763 {
3764         if (mddev->reshape_position != MaxSector &&
3765             mddev->chunk_sectors != mddev->new_chunk_sectors)
3766                 return sprintf(page, "%d (%d)\n",
3767                                mddev->new_chunk_sectors << 9,
3768                                mddev->chunk_sectors << 9);
3769         return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
3770 }
3771
3772 static ssize_t
3773 chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
3774 {
3775         unsigned long n;
3776         int err;
3777
3778         err = kstrtoul(buf, 10, &n);
3779         if (err < 0)
3780                 return err;
3781
3782         err = mddev_lock(mddev);
3783         if (err)
3784                 return err;
3785         if (mddev->pers) {
3786                 if (mddev->pers->check_reshape == NULL)
3787                         err = -EBUSY;
3788                 else if (mddev->ro)
3789                         err = -EROFS;
3790                 else {
3791                         mddev->new_chunk_sectors = n >> 9;
3792                         err = mddev->pers->check_reshape(mddev);
3793                         if (err)
3794                                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3795                 }
3796         } else {
3797                 mddev->new_chunk_sectors = n >> 9;
3798                 if (mddev->reshape_position == MaxSector)
3799                         mddev->chunk_sectors = n >> 9;
3800         }
3801         mddev_unlock(mddev);
3802         return err ?: len;
3803 }
3804 static struct md_sysfs_entry md_chunk_size =
3805 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
3806
3807 static ssize_t
3808 resync_start_show(struct mddev *mddev, char *page)
3809 {
3810         if (mddev->recovery_cp == MaxSector)
3811                 return sprintf(page, "none\n");
3812         return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
3813 }
3814
3815 static ssize_t
3816 resync_start_store(struct mddev *mddev, const char *buf, size_t len)
3817 {
3818         unsigned long long n;
3819         int err;
3820
3821         if (cmd_match(buf, "none"))
3822                 n = MaxSector;
3823         else {
3824                 err = kstrtoull(buf, 10, &n);
3825                 if (err < 0)
3826                         return err;
3827                 if (n != (sector_t)n)
3828                         return -EINVAL;
3829         }
3830
3831         err = mddev_lock(mddev);
3832         if (err)
3833                 return err;
3834         if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
3835                 err = -EBUSY;
3836
3837         if (!err) {
3838                 mddev->recovery_cp = n;
3839                 if (mddev->pers)
3840                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3841         }
3842         mddev_unlock(mddev);
3843         return err ?: len;
3844 }
3845 static struct md_sysfs_entry md_resync_start =
3846 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
3847                 resync_start_show, resync_start_store);
3848
3849 /*
3850  * The array state can be:
3851  *
3852  * clear
3853  *     No devices, no size, no level
3854  *     Equivalent to STOP_ARRAY ioctl
3855  * inactive
3856  *     May have some settings, but array is not active
3857  *        all IO results in error
3858  *     When written, doesn't tear down array, but just stops it
3859  * suspended (not supported yet)
3860  *     All IO requests will block. The array can be reconfigured.
3861  *     Writing this, if accepted, will block until array is quiescent
3862  * readonly
3863  *     no resync can happen.  no superblocks get written.
3864  *     write requests fail
3865  * read-auto
3866  *     like readonly, but behaves like 'clean' on a write request.
3867  *
3868  * clean - no pending writes, but otherwise active.
3869  *     When written to inactive array, starts without resync
3870  *     If a write request arrives then
3871  *       if metadata is known, mark 'dirty' and switch to 'active'.
3872  *       if not known, block and switch to write-pending
3873  *     If written to an active array that has pending writes, then fails.
3874  * active
3875  *     fully active: IO and resync can be happening.
3876  *     When written to inactive array, starts with resync
3877  *
3878  * write-pending
3879  *     clean, but writes are blocked waiting for 'active' to be written.
3880  *
3881  * active-idle
3882  *     like active, but no writes have been seen for a while (100msec).
3883  *
3884  */
3885 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
3886                    write_pending, active_idle, bad_word};
3887 static char *array_states[] = {
3888         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
3889         "write-pending", "active-idle", NULL };
3890
3891 static int match_word(const char *word, char **list)
3892 {
3893         int n;
3894         for (n=0; list[n]; n++)
3895                 if (cmd_match(word, list[n]))
3896                         break;
3897         return n;
3898 }
3899
3900 static ssize_t
3901 array_state_show(struct mddev *mddev, char *page)
3902 {
3903         enum array_state st = inactive;
3904
3905         if (mddev->pers)
3906                 switch(mddev->ro) {
3907                 case 1:
3908                         st = readonly;
3909                         break;
3910                 case 2:
3911                         st = read_auto;
3912                         break;
3913                 case 0:
3914                         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
3915                                 st = write_pending;
3916                         else if (mddev->in_sync)
3917                                 st = clean;
3918                         else if (mddev->safemode)
3919                                 st = active_idle;
3920                         else
3921                                 st = active;
3922                 }
3923         else {
3924                 if (list_empty(&mddev->disks) &&
3925                     mddev->raid_disks == 0 &&
3926                     mddev->dev_sectors == 0)
3927                         st = clear;
3928                 else
3929                         st = inactive;
3930         }
3931         return sprintf(page, "%s\n", array_states[st]);
3932 }
3933
3934 static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
3935 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
3936 static int do_md_run(struct mddev *mddev);
3937 static int restart_array(struct mddev *mddev);
3938
3939 static ssize_t
3940 array_state_store(struct mddev *mddev, const char *buf, size_t len)
3941 {
3942         int err;
3943         enum array_state st = match_word(buf, array_states);
3944
3945         if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
3946                 /* don't take reconfig_mutex when toggling between
3947                  * clean and active
3948                  */
3949                 spin_lock(&mddev->lock);
3950                 if (st == active) {
3951                         restart_array(mddev);
3952                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
3953                         md_wakeup_thread(mddev->thread);
3954                         wake_up(&mddev->sb_wait);
3955                         err = 0;
3956                 } else /* st == clean */ {
3957                         restart_array(mddev);
3958                         if (atomic_read(&mddev->writes_pending) == 0) {
3959                                 if (mddev->in_sync == 0) {
3960                                         mddev->in_sync = 1;
3961                                         if (mddev->safemode == 1)
3962                                                 mddev->safemode = 0;
3963                                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
3964                                 }
3965                                 err = 0;
3966                         } else
3967                                 err = -EBUSY;
3968                 }
3969                 if (!err)
3970                         sysfs_notify_dirent_safe(mddev->sysfs_state);
3971                 spin_unlock(&mddev->lock);
3972                 return err ?: len;
3973         }
3974         err = mddev_lock(mddev);
3975         if (err)
3976                 return err;
3977         err = -EINVAL;
3978         switch(st) {
3979         case bad_word:
3980                 break;
3981         case clear:
3982                 /* stopping an active array */
3983                 err = do_md_stop(mddev, 0, NULL);
3984                 break;
3985         case inactive:
3986                 /* stopping an active array */
3987                 if (mddev->pers)
3988                         err = do_md_stop(mddev, 2, NULL);
3989                 else
3990                         err = 0; /* already inactive */
3991                 break;
3992         case suspended:
3993                 break; /* not supported yet */
3994         case readonly:
3995                 if (mddev->pers)
3996                         err = md_set_readonly(mddev, NULL);
3997                 else {
3998                         mddev->ro = 1;
3999                         set_disk_ro(mddev->gendisk, 1);
4000                         err = do_md_run(mddev);
4001                 }
4002                 break;
4003         case read_auto:
4004                 if (mddev->pers) {
4005                         if (mddev->ro == 0)
4006                                 err = md_set_readonly(mddev, NULL);
4007                         else if (mddev->ro == 1)
4008                                 err = restart_array(mddev);
4009                         if (err == 0) {
4010                                 mddev->ro = 2;
4011                                 set_disk_ro(mddev->gendisk, 0);
4012                         }
4013                 } else {
4014                         mddev->ro = 2;
4015                         err = do_md_run(mddev);
4016                 }
4017                 break;
4018         case clean:
4019                 if (mddev->pers) {
4020                         err = restart_array(mddev);
4021                         if (err)
4022                                 break;
4023                         spin_lock(&mddev->lock);
4024                         if (atomic_read(&mddev->writes_pending) == 0) {
4025                                 if (mddev->in_sync == 0) {
4026                                         mddev->in_sync = 1;
4027                                         if (mddev->safemode == 1)
4028                                                 mddev->safemode = 0;
4029                                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
4030                                 }
4031                                 err = 0;
4032                         } else
4033                                 err = -EBUSY;
4034                         spin_unlock(&mddev->lock);
4035                 } else
4036                         err = -EINVAL;
4037                 break;
4038         case active:
4039                 if (mddev->pers) {
4040                         err = restart_array(mddev);
4041                         if (err)
4042                                 break;
4043                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
4044                         wake_up(&mddev->sb_wait);
4045                         err = 0;
4046                 } else {
4047                         mddev->ro = 0;
4048                         set_disk_ro(mddev->gendisk, 0);
4049                         err = do_md_run(mddev);
4050                 }
4051                 break;
4052         case write_pending:
4053         case active_idle:
4054                 /* these cannot be set */
4055                 break;
4056         }
4057
4058         if (!err) {
4059                 if (mddev->hold_active == UNTIL_IOCTL)
4060                         mddev->hold_active = 0;
4061                 sysfs_notify_dirent_safe(mddev->sysfs_state);
4062         }
4063         mddev_unlock(mddev);
4064         return err ?: len;
4065 }
4066 static struct md_sysfs_entry md_array_state =
4067 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
4068
4069 static ssize_t
4070 max_corrected_read_errors_show(struct mddev *mddev, char *page) {
4071         return sprintf(page, "%d\n",
4072                        atomic_read(&mddev->max_corr_read_errors));
4073 }
4074
4075 static ssize_t
4076 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
4077 {
4078         unsigned int n;
4079         int rv;
4080
4081         rv = kstrtouint(buf, 10, &n);
4082         if (rv < 0)
4083                 return rv;
4084         atomic_set(&mddev->max_corr_read_errors, n);
4085         return len;
4086 }
4087
4088 static struct md_sysfs_entry max_corr_read_errors =
4089 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
4090         max_corrected_read_errors_store);
4091
4092 static ssize_t
4093 null_show(struct mddev *mddev, char *page)
4094 {
4095         return -EINVAL;
4096 }
4097
4098 static ssize_t
4099 new_dev_store(struct mddev *mddev, const char *buf, size_t len)
4100 {
4101         /* buf must be %d:%d\n? giving major and minor numbers */
4102         /* The new device is added to the array.
4103          * If the array has a persistent superblock, we read the
4104          * superblock to initialise info and check validity.
4105          * Otherwise, only checking done is that in bind_rdev_to_array,
4106          * which mainly checks size.
4107          */
4108         char *e;
4109         int major = simple_strtoul(buf, &e, 10);
4110         int minor;
4111         dev_t dev;
4112         struct md_rdev *rdev;
4113         int err;
4114
4115         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
4116                 return -EINVAL;
4117         minor = simple_strtoul(e+1, &e, 10);
4118         if (*e && *e != '\n')
4119                 return -EINVAL;
4120         dev = MKDEV(major, minor);
4121         if (major != MAJOR(dev) ||
4122             minor != MINOR(dev))
4123                 return -EOVERFLOW;
4124
4125         flush_workqueue(md_misc_wq);
4126
4127         err = mddev_lock(mddev);
4128         if (err)
4129                 return err;
4130         if (mddev->persistent) {
4131                 rdev = md_import_device(dev, mddev->major_version,
4132                                         mddev->minor_version);
4133                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4134                         struct md_rdev *rdev0
4135                                 = list_entry(mddev->disks.next,
4136                                              struct md_rdev, same_set);
4137                         err = super_types[mddev->major_version]
4138                                 .load_super(rdev, rdev0, mddev->minor_version);
4139                         if (err < 0)
4140                                 goto out;
4141                 }
4142         } else if (mddev->external)
4143                 rdev = md_import_device(dev, -2, -1);
4144         else
4145                 rdev = md_import_device(dev, -1, -1);
4146
4147         if (IS_ERR(rdev)) {
4148                 mddev_unlock(mddev);
4149                 return PTR_ERR(rdev);
4150         }
4151         err = bind_rdev_to_array(rdev, mddev);
4152  out:
4153         if (err)
4154                 export_rdev(rdev);
4155         mddev_unlock(mddev);
4156         return err ? err : len;
4157 }
4158
4159 static struct md_sysfs_entry md_new_device =
4160 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
4161
4162 static ssize_t
4163 bitmap_store(struct mddev *mddev, const char *buf, size_t len)
4164 {
4165         char *end;
4166         unsigned long chunk, end_chunk;
4167         int err;
4168
4169         err = mddev_lock(mddev);
4170         if (err)
4171                 return err;
4172         if (!mddev->bitmap)
4173                 goto out;
4174         /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
4175         while (*buf) {
4176                 chunk = end_chunk = simple_strtoul(buf, &end, 0);
4177                 if (buf == end) break;
4178                 if (*end == '-') { /* range */
4179                         buf = end + 1;
4180                         end_chunk = simple_strtoul(buf, &end, 0);
4181                         if (buf == end) break;
4182                 }
4183                 if (*end && !isspace(*end)) break;
4184                 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
4185                 buf = skip_spaces(end);
4186         }
4187         bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
4188 out:
4189         mddev_unlock(mddev);
4190         return len;
4191 }
4192
4193 static struct md_sysfs_entry md_bitmap =
4194 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
4195
4196 static ssize_t
4197 size_show(struct mddev *mddev, char *page)
4198 {
4199         return sprintf(page, "%llu\n",
4200                 (unsigned long long)mddev->dev_sectors / 2);
4201 }
4202
4203 static int update_size(struct mddev *mddev, sector_t num_sectors);
4204
4205 static ssize_t
4206 size_store(struct mddev *mddev, const char *buf, size_t len)
4207 {
4208         /* If array is inactive, we can reduce the component size, but
4209          * not increase it (except from 0).
4210          * If array is active, we can try an on-line resize
4211          */
4212         sector_t sectors;
4213         int err = strict_blocks_to_sectors(buf, &sectors);
4214
4215         if (err < 0)
4216                 return err;
4217         err = mddev_lock(mddev);
4218         if (err)
4219                 return err;
4220         if (mddev->pers) {
4221                 err = update_size(mddev, sectors);
4222                 if (err == 0)
4223                         md_update_sb(mddev, 1);
4224         } else {
4225                 if (mddev->dev_sectors == 0 ||
4226                     mddev->dev_sectors > sectors)
4227                         mddev->dev_sectors = sectors;
4228                 else
4229                         err = -ENOSPC;
4230         }
4231         mddev_unlock(mddev);
4232         return err ? err : len;
4233 }
4234
4235 static struct md_sysfs_entry md_size =
4236 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
4237
4238 /* Metadata version.
4239  * This is one of
4240  *   'none' for arrays with no metadata (good luck...)
4241  *   'external' for arrays with externally managed metadata,
4242  * or N.M for internally known formats
4243  */
4244 static ssize_t
4245 metadata_show(struct mddev *mddev, char *page)
4246 {
4247         if (mddev->persistent)
4248                 return sprintf(page, "%d.%d\n",
4249                                mddev->major_version, mddev->minor_version);
4250         else if (mddev->external)
4251                 return sprintf(page, "external:%s\n", mddev->metadata_type);
4252         else
4253                 return sprintf(page, "none\n");
4254 }
4255
4256 static ssize_t
4257 metadata_store(struct mddev *mddev, const char *buf, size_t len)
4258 {
4259         int major, minor;
4260         char *e;
4261         int err;
4262         /* Changing the details of 'external' metadata is
4263          * always permitted.  Otherwise there must be
4264          * no devices attached to the array.
4265          */
4266
4267         err = mddev_lock(mddev);
4268         if (err)
4269                 return err;
4270         err = -EBUSY;
4271         if (mddev->external && strncmp(buf, "external:", 9) == 0)
4272                 ;
4273         else if (!list_empty(&mddev->disks))
4274                 goto out_unlock;
4275
4276         err = 0;
4277         if (cmd_match(buf, "none")) {
4278                 mddev->persistent = 0;
4279                 mddev->external = 0;
4280                 mddev->major_version = 0;
4281                 mddev->minor_version = 90;
4282                 goto out_unlock;
4283         }
4284         if (strncmp(buf, "external:", 9) == 0) {
4285                 size_t namelen = len-9;
4286                 if (namelen >= sizeof(mddev->metadata_type))
4287                         namelen = sizeof(mddev->metadata_type)-1;
4288                 strncpy(mddev->metadata_type, buf+9, namelen);
4289                 mddev->metadata_type[namelen] = 0;
4290                 if (namelen && mddev->metadata_type[namelen-1] == '\n')
4291                         mddev->metadata_type[--namelen] = 0;
4292                 mddev->persistent = 0;
4293                 mddev->external = 1;
4294                 mddev->major_version = 0;
4295                 mddev->minor_version = 90;
4296                 goto out_unlock;
4297         }
4298         major = simple_strtoul(buf, &e, 10);
4299         err = -EINVAL;
4300         if (e==buf || *e != '.')
4301                 goto out_unlock;
4302         buf = e+1;
4303         minor = simple_strtoul(buf, &e, 10);
4304         if (e==buf || (*e && *e != '\n') )
4305                 goto out_unlock;
4306         err = -ENOENT;
4307         if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
4308                 goto out_unlock;
4309         mddev->major_version = major;
4310         mddev->minor_version = minor;
4311         mddev->persistent = 1;
4312         mddev->external = 0;
4313         err = 0;
4314 out_unlock:
4315         mddev_unlock(mddev);
4316         return err ?: len;
4317 }
4318
4319 static struct md_sysfs_entry md_metadata =
4320 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
4321
4322 static ssize_t
4323 action_show(struct mddev *mddev, char *page)
4324 {
4325         char *type = "idle";
4326         unsigned long recovery = mddev->recovery;
4327         if (test_bit(MD_RECOVERY_FROZEN, &recovery))
4328                 type = "frozen";
4329         else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
4330             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
4331                 if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
4332                         type = "reshape";
4333                 else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
4334                         if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
4335                                 type = "resync";
4336                         else if (test_bit(MD_RECOVERY_CHECK, &recovery))
4337                                 type = "check";
4338                         else
4339                                 type = "repair";
4340                 } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
4341                         type = "recover";
4342                 else if (mddev->reshape_position != MaxSector)
4343                         type = "reshape";
4344         }
4345         return sprintf(page, "%s\n", type);
4346 }
4347
4348 static ssize_t
4349 action_store(struct mddev *mddev, const char *page, size_t len)
4350 {
4351         if (!mddev->pers || !mddev->pers->sync_request)
4352                 return -EINVAL;
4353
4354
4355         if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
4356                 if (cmd_match(page, "frozen"))
4357                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4358                 else
4359                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4360                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
4361                     mddev_lock(mddev) == 0) {
4362                         flush_workqueue(md_misc_wq);
4363                         if (mddev->sync_thread) {
4364                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4365                                 md_reap_sync_thread(mddev);
4366                         }
4367                         mddev_unlock(mddev);
4368                 }
4369         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4370                 return -EBUSY;
4371         else if (cmd_match(page, "resync"))
4372                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4373         else if (cmd_match(page, "recover")) {
4374                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4375                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
4376         } else if (cmd_match(page, "reshape")) {
4377                 int err;
4378                 if (mddev->pers->start_reshape == NULL)
4379                         return -EINVAL;
4380                 err = mddev_lock(mddev);
4381                 if (!err) {
4382                         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4383                                 err =  -EBUSY;
4384                         else {
4385                                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4386                                 err = mddev->pers->start_reshape(mddev);
4387                         }
4388                         mddev_unlock(mddev);
4389                 }
4390                 if (err)
4391                         return err;
4392                 sysfs_notify(&mddev->kobj, NULL, "degraded");
4393         } else {
4394                 if (cmd_match(page, "check"))
4395                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4396                 else if (!cmd_match(page, "repair"))
4397                         return -EINVAL;
4398                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4399                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
4400                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4401         }
4402         if (mddev->ro == 2) {
4403                 /* A write to sync_action is enough to justify
4404                  * canceling read-auto mode
4405                  */
4406                 mddev->ro = 0;
4407                 md_wakeup_thread(mddev->sync_thread);
4408         }
4409         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4410         md_wakeup_thread(mddev->thread);
4411         sysfs_notify_dirent_safe(mddev->sysfs_action);
4412         return len;
4413 }
4414
4415 static struct md_sysfs_entry md_scan_mode =
4416 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
4417
4418 static ssize_t
4419 last_sync_action_show(struct mddev *mddev, char *page)
4420 {
4421         return sprintf(page, "%s\n", mddev->last_sync_action);
4422 }
4423
4424 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
4425
4426 static ssize_t
4427 mismatch_cnt_show(struct mddev *mddev, char *page)
4428 {
4429         return sprintf(page, "%llu\n",
4430                        (unsigned long long)
4431                        atomic64_read(&mddev->resync_mismatches));
4432 }
4433
4434 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
4435
4436 static ssize_t
4437 sync_min_show(struct mddev *mddev, char *page)
4438 {
4439         return sprintf(page, "%d (%s)\n", speed_min(mddev),
4440                        mddev->sync_speed_min ? "local": "system");
4441 }
4442
4443 static ssize_t
4444 sync_min_store(struct mddev *mddev, const char *buf, size_t len)
4445 {
4446         unsigned int min;
4447         int rv;
4448
4449         if (strncmp(buf, "system", 6)==0) {
4450                 min = 0;
4451         } else {
4452                 rv = kstrtouint(buf, 10, &min);
4453                 if (rv < 0)
4454                         return rv;
4455                 if (min == 0)
4456                         return -EINVAL;
4457         }
4458         mddev->sync_speed_min = min;
4459         return len;
4460 }
4461
4462 static struct md_sysfs_entry md_sync_min =
4463 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
4464
4465 static ssize_t
4466 sync_max_show(struct mddev *mddev, char *page)
4467 {
4468         return sprintf(page, "%d (%s)\n", speed_max(mddev),
4469                        mddev->sync_speed_max ? "local": "system");
4470 }
4471
4472 static ssize_t
4473 sync_max_store(struct mddev *mddev, const char *buf, size_t len)
4474 {
4475         unsigned int max;
4476         int rv;
4477
4478         if (strncmp(buf, "system", 6)==0) {
4479                 max = 0;
4480         } else {
4481                 rv = kstrtouint(buf, 10, &max);
4482                 if (rv < 0)
4483                         return rv;
4484                 if (max == 0)
4485                         return -EINVAL;
4486         }
4487         mddev->sync_speed_max = max;
4488         return len;
4489 }
4490
4491 static struct md_sysfs_entry md_sync_max =
4492 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
4493
4494 static ssize_t
4495 degraded_show(struct mddev *mddev, char *page)
4496 {
4497         return sprintf(page, "%d\n", mddev->degraded);
4498 }
4499 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
4500
4501 static ssize_t
4502 sync_force_parallel_show(struct mddev *mddev, char *page)
4503 {
4504         return sprintf(page, "%d\n", mddev->parallel_resync);
4505 }
4506
4507 static ssize_t
4508 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
4509 {
4510         long n;
4511
4512         if (kstrtol(buf, 10, &n))
4513                 return -EINVAL;
4514
4515         if (n != 0 && n != 1)
4516                 return -EINVAL;
4517
4518         mddev->parallel_resync = n;
4519
4520         if (mddev->sync_thread)
4521                 wake_up(&resync_wait);
4522
4523         return len;
4524 }
4525
4526 /* force parallel resync, even with shared block devices */
4527 static struct md_sysfs_entry md_sync_force_parallel =
4528 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
4529        sync_force_parallel_show, sync_force_parallel_store);
4530
4531 static ssize_t
4532 sync_speed_show(struct mddev *mddev, char *page)
4533 {
4534         unsigned long resync, dt, db;
4535         if (mddev->curr_resync == 0)
4536                 return sprintf(page, "none\n");
4537         resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
4538         dt = (jiffies - mddev->resync_mark) / HZ;
4539         if (!dt) dt++;
4540         db = resync - mddev->resync_mark_cnt;
4541         return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
4542 }
4543
4544 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
4545
4546 static ssize_t
4547 sync_completed_show(struct mddev *mddev, char *page)
4548 {
4549         unsigned long long max_sectors, resync;
4550
4551         if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4552                 return sprintf(page, "none\n");
4553
4554         if (mddev->curr_resync == 1 ||
4555             mddev->curr_resync == 2)
4556                 return sprintf(page, "delayed\n");
4557
4558         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
4559             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4560                 max_sectors = mddev->resync_max_sectors;
4561         else
4562                 max_sectors = mddev->dev_sectors;
4563
4564         resync = mddev->curr_resync_completed;
4565         return sprintf(page, "%llu / %llu\n", resync, max_sectors);
4566 }
4567
4568 static struct md_sysfs_entry md_sync_completed =
4569         __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
4570
4571 static ssize_t
4572 min_sync_show(struct mddev *mddev, char *page)
4573 {
4574         return sprintf(page, "%llu\n",
4575                        (unsigned long long)mddev->resync_min);
4576 }
4577 static ssize_t
4578 min_sync_store(struct mddev *mddev, const char *buf, size_t len)
4579 {
4580         unsigned long long min;
4581         int err;
4582
4583         if (kstrtoull(buf, 10, &min))
4584                 return -EINVAL;
4585
4586         spin_lock(&mddev->lock);
4587         err = -EINVAL;
4588         if (min > mddev->resync_max)
4589                 goto out_unlock;
4590
4591         err = -EBUSY;
4592         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4593                 goto out_unlock;
4594
4595         /* Round down to multiple of 4K for safety */
4596         mddev->resync_min = round_down(min, 8);
4597         err = 0;
4598
4599 out_unlock:
4600         spin_unlock(&mddev->lock);
4601         return err ?: len;
4602 }
4603
4604 static struct md_sysfs_entry md_min_sync =
4605 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
4606
4607 static ssize_t
4608 max_sync_show(struct mddev *mddev, char *page)
4609 {
4610         if (mddev->resync_max == MaxSector)
4611                 return sprintf(page, "max\n");
4612         else
4613                 return sprintf(page, "%llu\n",
4614                                (unsigned long long)mddev->resync_max);
4615 }
4616 static ssize_t
4617 max_sync_store(struct mddev *mddev, const char *buf, size_t len)
4618 {
4619         int err;
4620         spin_lock(&mddev->lock);
4621         if (strncmp(buf, "max", 3) == 0)
4622                 mddev->resync_max = MaxSector;
4623         else {
4624                 unsigned long long max;
4625                 int chunk;
4626
4627                 err = -EINVAL;
4628                 if (kstrtoull(buf, 10, &max))
4629                         goto out_unlock;
4630                 if (max < mddev->resync_min)
4631                         goto out_unlock;
4632
4633                 err = -EBUSY;
4634                 if (max < mddev->resync_max &&
4635                     mddev->ro == 0 &&
4636                     test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4637                         goto out_unlock;
4638
4639                 /* Must be a multiple of chunk_size */
4640                 chunk = mddev->chunk_sectors;
4641                 if (chunk) {
4642                         sector_t temp = max;
4643
4644                         err = -EINVAL;
4645                         if (sector_div(temp, chunk))
4646                                 goto out_unlock;
4647                 }
4648                 mddev->resync_max = max;
4649         }
4650         wake_up(&mddev->recovery_wait);
4651         err = 0;
4652 out_unlock:
4653         spin_unlock(&mddev->lock);
4654         return err ?: len;
4655 }
4656
4657 static struct md_sysfs_entry md_max_sync =
4658 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
4659
4660 static ssize_t
4661 suspend_lo_show(struct mddev *mddev, char *page)
4662 {
4663         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
4664 }
4665
4666 static ssize_t
4667 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
4668 {
4669         unsigned long long old, new;
4670         int err;
4671
4672         err = kstrtoull(buf, 10, &new);
4673         if (err < 0)
4674                 return err;
4675         if (new != (sector_t)new)
4676                 return -EINVAL;
4677
4678         err = mddev_lock(mddev);
4679         if (err)
4680                 return err;
4681         err = -EINVAL;
4682         if (mddev->pers == NULL ||
4683             mddev->pers->quiesce == NULL)
4684                 goto unlock;
4685         old = mddev->suspend_lo;
4686         mddev->suspend_lo = new;
4687         if (new >= old)
4688                 /* Shrinking suspended region */
4689                 mddev->pers->quiesce(mddev, 2);
4690         else {
4691                 /* Expanding suspended region - need to wait */
4692                 mddev->pers->quiesce(mddev, 1);
4693                 mddev->pers->quiesce(mddev, 0);
4694         }
4695         err = 0;
4696 unlock:
4697         mddev_unlock(mddev);
4698         return err ?: len;
4699 }
4700 static struct md_sysfs_entry md_suspend_lo =
4701 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
4702
4703 static ssize_t
4704 suspend_hi_show(struct mddev *mddev, char *page)
4705 {
4706         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
4707 }
4708
4709 static ssize_t
4710 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
4711 {
4712         unsigned long long old, new;
4713         int err;
4714
4715         err = kstrtoull(buf, 10, &new);
4716         if (err < 0)
4717                 return err;
4718         if (new != (sector_t)new)
4719                 return -EINVAL;
4720
4721         err = mddev_lock(mddev);
4722         if (err)
4723                 return err;
4724         err = -EINVAL;
4725         if (mddev->pers == NULL ||
4726             mddev->pers->quiesce == NULL)
4727                 goto unlock;
4728         old = mddev->suspend_hi;
4729         mddev->suspend_hi = new;
4730         if (new <= old)
4731                 /* Shrinking suspended region */
4732                 mddev->pers->quiesce(mddev, 2);
4733         else {
4734                 /* Expanding suspended region - need to wait */
4735                 mddev->pers->quiesce(mddev, 1);
4736                 mddev->pers->quiesce(mddev, 0);
4737         }
4738         err = 0;
4739 unlock:
4740         mddev_unlock(mddev);
4741         return err ?: len;
4742 }
4743 static struct md_sysfs_entry md_suspend_hi =
4744 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
4745
4746 static ssize_t
4747 reshape_position_show(struct mddev *mddev, char *page)
4748 {
4749         if (mddev->reshape_position != MaxSector)
4750                 return sprintf(page, "%llu\n",
4751                                (unsigned long long)mddev->reshape_position);
4752         strcpy(page, "none\n");
4753         return 5;
4754 }
4755
4756 static ssize_t
4757 reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
4758 {
4759         struct md_rdev *rdev;
4760         unsigned long long new;
4761         int err;
4762
4763         err = kstrtoull(buf, 10, &new);
4764         if (err < 0)
4765                 return err;
4766         if (new != (sector_t)new)
4767                 return -EINVAL;
4768         err = mddev_lock(mddev);
4769         if (err)
4770                 return err;
4771         err = -EBUSY;
4772         if (mddev->pers)
4773                 goto unlock;
4774         mddev->reshape_position = new;
4775         mddev->delta_disks = 0;
4776         mddev->reshape_backwards = 0;
4777         mddev->new_level = mddev->level;
4778         mddev->new_layout = mddev->layout;
4779         mddev->new_chunk_sectors = mddev->chunk_sectors;
4780         rdev_for_each(rdev, mddev)
4781                 rdev->new_data_offset = rdev->data_offset;
4782         err = 0;
4783 unlock:
4784         mddev_unlock(mddev);
4785         return err ?: len;
4786 }
4787
4788 static struct md_sysfs_entry md_reshape_position =
4789 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
4790        reshape_position_store);
4791
4792 static ssize_t
4793 reshape_direction_show(struct mddev *mddev, char *page)
4794 {
4795         return sprintf(page, "%s\n",
4796                        mddev->reshape_backwards ? "backwards" : "forwards");
4797 }
4798
4799 static ssize_t
4800 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
4801 {
4802         int backwards = 0;
4803         int err;
4804
4805         if (cmd_match(buf, "forwards"))
4806                 backwards = 0;
4807         else if (cmd_match(buf, "backwards"))
4808                 backwards = 1;
4809         else
4810                 return -EINVAL;
4811         if (mddev->reshape_backwards == backwards)
4812                 return len;
4813
4814         err = mddev_lock(mddev);
4815         if (err)
4816                 return err;
4817         /* check if we are allowed to change */
4818         if (mddev->delta_disks)
4819                 err = -EBUSY;
4820         else if (mddev->persistent &&
4821             mddev->major_version == 0)
4822                 err =  -EINVAL;
4823         else
4824                 mddev->reshape_backwards = backwards;
4825         mddev_unlock(mddev);
4826         return err ?: len;
4827 }
4828
4829 static struct md_sysfs_entry md_reshape_direction =
4830 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
4831        reshape_direction_store);
4832
4833 static ssize_t
4834 array_size_show(struct mddev *mddev, char *page)
4835 {
4836         if (mddev->external_size)
4837                 return sprintf(page, "%llu\n",
4838                                (unsigned long long)mddev->array_sectors/2);
4839         else
4840                 return sprintf(page, "default\n");
4841 }
4842
4843 static ssize_t
4844 array_size_store(struct mddev *mddev, const char *buf, size_t len)
4845 {
4846         sector_t sectors;
4847         int err;
4848
4849         err = mddev_lock(mddev);
4850         if (err)
4851                 return err;
4852
4853         /* cluster raid doesn't support change array_sectors */
4854         if (mddev_is_clustered(mddev))
4855                 return -EINVAL;
4856
4857         if (strncmp(buf, "default", 7) == 0) {
4858                 if (mddev->pers)
4859                         sectors = mddev->pers->size(mddev, 0, 0);
4860                 else
4861                         sectors = mddev->array_sectors;
4862
4863                 mddev->external_size = 0;
4864         } else {
4865                 if (strict_blocks_to_sectors(buf, &sectors) < 0)
4866                         err = -EINVAL;
4867                 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
4868                         err = -E2BIG;
4869                 else
4870                         mddev->external_size = 1;
4871         }
4872
4873         if (!err) {
4874                 mddev->array_sectors = sectors;
4875                 if (mddev->pers) {
4876                         set_capacity(mddev->gendisk, mddev->array_sectors);
4877                         revalidate_disk(mddev->gendisk);
4878                 }
4879         }
4880         mddev_unlock(mddev);
4881         return err ?: len;
4882 }
4883
4884 static struct md_sysfs_entry md_array_size =
4885 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
4886        array_size_store);
4887
4888 static struct attribute *md_default_attrs[] = {
4889         &md_level.attr,
4890         &md_layout.attr,
4891         &md_raid_disks.attr,
4892         &md_chunk_size.attr,
4893         &md_size.attr,
4894         &md_resync_start.attr,
4895         &md_metadata.attr,
4896         &md_new_device.attr,
4897         &md_safe_delay.attr,
4898         &md_array_state.attr,
4899         &md_reshape_position.attr,
4900         &md_reshape_direction.attr,
4901         &md_array_size.attr,
4902         &max_corr_read_errors.attr,
4903         NULL,
4904 };
4905
4906 static struct attribute *md_redundancy_attrs[] = {
4907         &md_scan_mode.attr,
4908         &md_last_scan_mode.attr,
4909         &md_mismatches.attr,
4910         &md_sync_min.attr,
4911         &md_sync_max.attr,
4912         &md_sync_speed.attr,
4913         &md_sync_force_parallel.attr,
4914         &md_sync_completed.attr,
4915         &md_min_sync.attr,
4916         &md_max_sync.attr,
4917         &md_suspend_lo.attr,
4918         &md_suspend_hi.attr,
4919         &md_bitmap.attr,
4920         &md_degraded.attr,
4921         NULL,
4922 };
4923 static struct attribute_group md_redundancy_group = {
4924         .name = NULL,
4925         .attrs = md_redundancy_attrs,
4926 };
4927
4928 static ssize_t
4929 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
4930 {
4931         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4932         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4933         ssize_t rv;
4934
4935         if (!entry->show)
4936                 return -EIO;
4937         spin_lock(&all_mddevs_lock);
4938         if (list_empty(&mddev->all_mddevs)) {
4939                 spin_unlock(&all_mddevs_lock);
4940                 return -EBUSY;
4941         }
4942         mddev_get(mddev);
4943         spin_unlock(&all_mddevs_lock);
4944
4945         rv = entry->show(mddev, page);
4946         mddev_put(mddev);
4947         return rv;
4948 }
4949
4950 static ssize_t
4951 md_attr_store(struct kobject *kobj, struct attribute *attr,
4952               const char *page, size_t length)
4953 {
4954         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
4955         struct mddev *mddev = container_of(kobj, struct mddev, kobj);
4956         ssize_t rv;
4957
4958         if (!entry->store)
4959                 return -EIO;
4960         if (!capable(CAP_SYS_ADMIN))
4961                 return -EACCES;
4962         spin_lock(&all_mddevs_lock);
4963         if (list_empty(&mddev->all_mddevs)) {
4964                 spin_unlock(&all_mddevs_lock);
4965                 return -EBUSY;
4966         }
4967         mddev_get(mddev);
4968         spin_unlock(&all_mddevs_lock);
4969         rv = entry->store(mddev, page, length);
4970         mddev_put(mddev);
4971         return rv;
4972 }
4973
4974 static void md_free(struct kobject *ko)
4975 {
4976         struct mddev *mddev = container_of(ko, struct mddev, kobj);
4977
4978         if (mddev->sysfs_state)
4979                 sysfs_put(mddev->sysfs_state);
4980
4981         if (mddev->queue)
4982                 blk_cleanup_queue(mddev->queue);
4983         if (mddev->gendisk) {
4984                 del_gendisk(mddev->gendisk);
4985                 put_disk(mddev->gendisk);
4986         }
4987
4988         kfree(mddev);
4989 }
4990
4991 static const struct sysfs_ops md_sysfs_ops = {
4992         .show   = md_attr_show,
4993         .store  = md_attr_store,
4994 };
4995 static struct kobj_type md_ktype = {
4996         .release        = md_free,
4997         .sysfs_ops      = &md_sysfs_ops,
4998         .default_attrs  = md_default_attrs,
4999 };
5000
5001 int mdp_major = 0;
5002
5003 static void mddev_delayed_delete(struct work_struct *ws)
5004 {
5005         struct mddev *mddev = container_of(ws, struct mddev, del_work);
5006
5007         sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
5008         kobject_del(&mddev->kobj);
5009         kobject_put(&mddev->kobj);
5010 }
5011
5012 static int md_alloc(dev_t dev, char *name)
5013 {
5014         static DEFINE_MUTEX(disks_mutex);
5015         struct mddev *mddev = mddev_find(dev);
5016         struct gendisk *disk;
5017         int partitioned;
5018         int shift;
5019         int unit;
5020         int error;
5021
5022         if (!mddev)
5023                 return -ENODEV;
5024
5025         partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
5026         shift = partitioned ? MdpMinorShift : 0;
5027         unit = MINOR(mddev->unit) >> shift;
5028
5029         /* wait for any previous instance of this device to be
5030          * completely removed (mddev_delayed_delete).
5031          */
5032         flush_workqueue(md_misc_wq);
5033
5034         mutex_lock(&disks_mutex);
5035         error = -EEXIST;
5036         if (mddev->gendisk)
5037                 goto abort;
5038
5039         if (name) {
5040                 /* Need to ensure that 'name' is not a duplicate.
5041                  */
5042                 struct mddev *mddev2;
5043                 spin_lock(&all_mddevs_lock);
5044
5045                 list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
5046                         if (mddev2->gendisk &&
5047                             strcmp(mddev2->gendisk->disk_name, name) == 0) {
5048                                 spin_unlock(&all_mddevs_lock);
5049                                 goto abort;
5050                         }
5051                 spin_unlock(&all_mddevs_lock);
5052         }
5053
5054         error = -ENOMEM;
5055         mddev->queue = blk_alloc_queue(GFP_KERNEL);
5056         if (!mddev->queue)
5057                 goto abort;
5058         mddev->queue->queuedata = mddev;
5059
5060         blk_queue_make_request(mddev->queue, md_make_request);
5061         blk_set_stacking_limits(&mddev->queue->limits);
5062
5063         disk = alloc_disk(1 << shift);
5064         if (!disk) {
5065                 blk_cleanup_queue(mddev->queue);
5066                 mddev->queue = NULL;
5067                 goto abort;
5068         }
5069         disk->major = MAJOR(mddev->unit);
5070         disk->first_minor = unit << shift;
5071         if (name)
5072                 strcpy(disk->disk_name, name);
5073         else if (partitioned)
5074                 sprintf(disk->disk_name, "md_d%d", unit);
5075         else
5076                 sprintf(disk->disk_name, "md%d", unit);
5077         disk->fops = &md_fops;
5078         disk->private_data = mddev;
5079         disk->queue = mddev->queue;
5080         blk_queue_write_cache(mddev->queue, true, true);
5081         /* Allow extended partitions.  This makes the
5082          * 'mdp' device redundant, but we can't really
5083          * remove it now.
5084          */
5085         disk->flags |= GENHD_FL_EXT_DEVT;
5086         mddev->gendisk = disk;
5087         /* As soon as we call add_disk(), another thread could get
5088          * through to md_open, so make sure it doesn't get too far
5089          */
5090         mutex_lock(&mddev->open_mutex);
5091         add_disk(disk);
5092
5093         error = kobject_init_and_add(&mddev->kobj, &md_ktype,
5094                                      &disk_to_dev(disk)->kobj, "%s", "md");
5095         if (error) {
5096                 /* This isn't possible, but as kobject_init_and_add is marked
5097                  * __must_check, we must do something with the result
5098                  */
5099                 pr_debug("md: cannot register %s/md - name in use\n",
5100                          disk->disk_name);
5101                 error = 0;
5102         }
5103         if (mddev->kobj.sd &&
5104             sysfs_create_group(&mddev->kobj, &md_bitmap_group))
5105                 pr_debug("pointless warning\n");
5106         mutex_unlock(&mddev->open_mutex);
5107  abort:
5108         mutex_unlock(&disks_mutex);
5109         if (!error && mddev->kobj.sd) {
5110                 kobject_uevent(&mddev->kobj, KOBJ_ADD);
5111                 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
5112         }
5113         mddev_put(mddev);
5114         return error;
5115 }
5116
5117 static struct kobject *md_probe(dev_t dev, int *part, void *data)
5118 {
5119         md_alloc(dev, NULL);
5120         return NULL;
5121 }
5122
5123 static int add_named_array(const char *val, struct kernel_param *kp)
5124 {
5125         /* val must be "md_*" where * is not all digits.
5126          * We allocate an array with a large free minor number, and
5127          * set the name to val.  val must not already be an active name.
5128          */
5129         int len = strlen(val);
5130         char buf[DISK_NAME_LEN];
5131
5132         while (len && val[len-1] == '\n')
5133                 len--;
5134         if (len >= DISK_NAME_LEN)
5135                 return -E2BIG;
5136         strlcpy(buf, val, len+1);
5137         if (strncmp(buf, "md_", 3) != 0)
5138                 return -EINVAL;
5139         return md_alloc(0, buf);
5140 }
5141
5142 static void md_safemode_timeout(unsigned long data)
5143 {
5144         struct mddev *mddev = (struct mddev *) data;
5145
5146         if (!atomic_read(&mddev->writes_pending)) {
5147                 mddev->safemode = 1;
5148                 if (mddev->external)
5149                         sysfs_notify_dirent_safe(mddev->sysfs_state);
5150         }
5151         md_wakeup_thread(mddev->thread);
5152 }
5153
5154 static int start_dirty_degraded;
5155
5156 int md_run(struct mddev *mddev)
5157 {
5158         int err;
5159         struct md_rdev *rdev;
5160         struct md_personality *pers;
5161
5162         if (list_empty(&mddev->disks))
5163                 /* cannot run an array with no devices.. */
5164                 return -EINVAL;
5165
5166         if (mddev->pers)
5167                 return -EBUSY;
5168         /* Cannot run until previous stop completes properly */
5169         if (mddev->sysfs_active)
5170                 return -EBUSY;
5171
5172         /*
5173          * Analyze all RAID superblock(s)
5174          */
5175         if (!mddev->raid_disks) {
5176                 if (!mddev->persistent)
5177                         return -EINVAL;
5178                 analyze_sbs(mddev);
5179         }
5180
5181         if (mddev->level != LEVEL_NONE)
5182                 request_module("md-level-%d", mddev->level);
5183         else if (mddev->clevel[0])
5184                 request_module("md-%s", mddev->clevel);
5185
5186         /*
5187          * Drop all container device buffers, from now on
5188          * the only valid external interface is through the md
5189          * device.
5190          */
5191         rdev_for_each(rdev, mddev) {
5192                 if (test_bit(Faulty, &rdev->flags))
5193                         continue;
5194                 sync_blockdev(rdev->bdev);
5195                 invalidate_bdev(rdev->bdev);
5196
5197                 /* perform some consistency tests on the device.
5198                  * We don't want the data to overlap the metadata,
5199                  * Internal Bitmap issues have been handled elsewhere.
5200                  */
5201                 if (rdev->meta_bdev) {
5202                         /* Nothing to check */;
5203                 } else if (rdev->data_offset < rdev->sb_start) {
5204                         if (mddev->dev_sectors &&
5205                             rdev->data_offset + mddev->dev_sectors
5206                             > rdev->sb_start) {
5207                                 pr_warn("md: %s: data overlaps metadata\n",
5208                                         mdname(mddev));
5209                                 return -EINVAL;
5210                         }
5211                 } else {
5212                         if (rdev->sb_start + rdev->sb_size/512
5213                             > rdev->data_offset) {
5214                                 pr_warn("md: %s: metadata overlaps data\n",
5215                                         mdname(mddev));
5216                                 return -EINVAL;
5217                         }
5218                 }
5219                 sysfs_notify_dirent_safe(rdev->sysfs_state);
5220         }
5221
5222         if (mddev->bio_set == NULL) {
5223                 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
5224                 if (!mddev->bio_set)
5225                         return -ENOMEM;
5226         }
5227
5228         spin_lock(&pers_lock);
5229         pers = find_pers(mddev->level, mddev->clevel);
5230         if (!pers || !try_module_get(pers->owner)) {
5231                 spin_unlock(&pers_lock);
5232                 if (mddev->level != LEVEL_NONE)
5233                         pr_warn("md: personality for level %d is not loaded!\n",
5234                                 mddev->level);
5235                 else
5236                         pr_warn("md: personality for level %s is not loaded!\n",
5237                                 mddev->clevel);
5238                 return -EINVAL;
5239         }
5240         spin_unlock(&pers_lock);
5241         if (mddev->level != pers->level) {
5242                 mddev->level = pers->level;
5243                 mddev->new_level = pers->level;
5244         }
5245         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
5246
5247         if (mddev->reshape_position != MaxSector &&
5248             pers->start_reshape == NULL) {
5249                 /* This personality cannot handle reshaping... */
5250                 module_put(pers->owner);
5251                 return -EINVAL;
5252         }
5253
5254         if (pers->sync_request) {
5255                 /* Warn if this is a potentially silly
5256                  * configuration.
5257                  */
5258                 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
5259                 struct md_rdev *rdev2;
5260                 int warned = 0;
5261
5262                 rdev_for_each(rdev, mddev)
5263                         rdev_for_each(rdev2, mddev) {
5264                                 if (rdev < rdev2 &&
5265                                     rdev->bdev->bd_contains ==
5266                                     rdev2->bdev->bd_contains) {
5267                                         pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
5268                                                 mdname(mddev),
5269                                                 bdevname(rdev->bdev,b),
5270                                                 bdevname(rdev2->bdev,b2));
5271                                         warned = 1;
5272                                 }
5273                         }
5274
5275                 if (warned)
5276                         pr_warn("True protection against single-disk failure might be compromised.\n");
5277         }
5278
5279         mddev->recovery = 0;
5280         /* may be over-ridden by personality */
5281         mddev->resync_max_sectors = mddev->dev_sectors;
5282
5283         mddev->ok_start_degraded = start_dirty_degraded;
5284
5285         if (start_readonly && mddev->ro == 0)
5286                 mddev->ro = 2; /* read-only, but switch on first write */
5287
5288         /*
5289          * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
5290          * up mddev->thread. It is important to initialize critical
5291          * resources for mddev->thread BEFORE calling pers->run().
5292          */
5293         err = pers->run(mddev);
5294         if (err)
5295                 pr_warn("md: pers->run() failed ...\n");
5296         else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
5297                 WARN_ONCE(!mddev->external_size,
5298                           "%s: default size too small, but 'external_size' not in effect?\n",
5299                           __func__);
5300                 pr_warn("md: invalid array_size %llu > default size %llu\n",
5301                         (unsigned long long)mddev->array_sectors / 2,
5302                         (unsigned long long)pers->size(mddev, 0, 0) / 2);
5303                 err = -EINVAL;
5304         }
5305         if (err == 0 && pers->sync_request &&
5306             (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
5307                 struct bitmap *bitmap;
5308
5309                 bitmap = bitmap_create(mddev, -1);
5310                 if (IS_ERR(bitmap)) {
5311                         err = PTR_ERR(bitmap);
5312                         pr_warn("%s: failed to create bitmap (%d)\n",
5313                                 mdname(mddev), err);
5314                 } else
5315                         mddev->bitmap = bitmap;
5316
5317         }
5318         if (err) {
5319                 mddev_detach(mddev);
5320                 if (mddev->private)
5321                         pers->free(mddev, mddev->private);
5322                 mddev->private = NULL;
5323                 module_put(pers->owner);
5324                 bitmap_destroy(mddev);
5325                 return err;
5326         }
5327         if (mddev->queue) {
5328                 bool nonrot = true;
5329
5330                 rdev_for_each(rdev, mddev) {
5331                         if (rdev->raid_disk >= 0 &&
5332                             !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
5333                                 nonrot = false;
5334                                 break;
5335                         }
5336                 }
5337                 if (mddev->degraded)
5338                         nonrot = false;
5339                 if (nonrot)
5340                         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5341                 else
5342                         queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
5343                 mddev->queue->backing_dev_info->congested_data = mddev;
5344                 mddev->queue->backing_dev_info->congested_fn = md_congested;
5345         }
5346         if (pers->sync_request) {
5347                 if (mddev->kobj.sd &&
5348                     sysfs_create_group(&mddev->kobj, &md_redundancy_group))
5349                         pr_warn("md: cannot register extra attributes for %s\n",
5350                                 mdname(mddev));
5351                 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
5352         } else if (mddev->ro == 2) /* auto-readonly not meaningful */
5353                 mddev->ro = 0;
5354
5355         atomic_set(&mddev->writes_pending,0);
5356         atomic_set(&mddev->max_corr_read_errors,
5357                    MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
5358         mddev->safemode = 0;
5359         if (mddev_is_clustered(mddev))
5360                 mddev->safemode_delay = 0;
5361         else
5362                 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
5363         mddev->in_sync = 1;
5364         smp_wmb();
5365         spin_lock(&mddev->lock);
5366         mddev->pers = pers;
5367         spin_unlock(&mddev->lock);
5368         rdev_for_each(rdev, mddev)
5369                 if (rdev->raid_disk >= 0)
5370                         if (sysfs_link_rdev(mddev, rdev))
5371                                 /* failure here is OK */;
5372
5373         if (mddev->degraded && !mddev->ro)
5374                 /* This ensures that recovering status is reported immediately
5375                  * via sysfs - until a lack of spares is confirmed.
5376                  */
5377                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5378         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5379
5380         if (mddev->sb_flags)
5381                 md_update_sb(mddev, 0);
5382
5383         md_new_event(mddev);
5384         sysfs_notify_dirent_safe(mddev->sysfs_state);
5385         sysfs_notify_dirent_safe(mddev->sysfs_action);
5386         sysfs_notify(&mddev->kobj, NULL, "degraded");
5387         return 0;
5388 }
5389 EXPORT_SYMBOL_GPL(md_run);
5390
5391 static int do_md_run(struct mddev *mddev)
5392 {
5393         int err;
5394
5395         err = md_run(mddev);
5396         if (err)
5397                 goto out;
5398         err = bitmap_load(mddev);
5399         if (err) {
5400                 bitmap_destroy(mddev);
5401                 goto out;
5402         }
5403
5404         if (mddev_is_clustered(mddev))
5405                 md_allow_write(mddev);
5406
5407         md_wakeup_thread(mddev->thread);
5408         md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
5409
5410         set_capacity(mddev->gendisk, mddev->array_sectors);
5411         revalidate_disk(mddev->gendisk);
5412         mddev->changed = 1;
5413         kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
5414 out:
5415         return err;
5416 }
5417
5418 static int restart_array(struct mddev *mddev)
5419 {
5420         struct gendisk *disk = mddev->gendisk;
5421
5422         /* Complain if it has no devices */
5423         if (list_empty(&mddev->disks))
5424                 return -ENXIO;
5425         if (!mddev->pers)
5426                 return -EINVAL;
5427         if (!mddev->ro)
5428                 return -EBUSY;
5429         if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
5430                 struct md_rdev *rdev;
5431                 bool has_journal = false;
5432
5433                 rcu_read_lock();
5434                 rdev_for_each_rcu(rdev, mddev) {
5435                         if (test_bit(Journal, &rdev->flags) &&
5436                             !test_bit(Faulty, &rdev->flags)) {
5437                                 has_journal = true;
5438                                 break;
5439                         }
5440                 }
5441                 rcu_read_unlock();
5442
5443                 /* Don't restart rw with journal missing/faulty */
5444                 if (!has_journal)
5445                         return -EINVAL;
5446         }
5447
5448         mddev->safemode = 0;
5449         mddev->ro = 0;
5450         set_disk_ro(disk, 0);
5451         pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
5452         /* Kick recovery or resync if necessary */
5453         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5454         md_wakeup_thread(mddev->thread);
5455         md_wakeup_thread(mddev->sync_thread);
5456         sysfs_notify_dirent_safe(mddev->sysfs_state);
5457         return 0;
5458 }
5459
5460 static void md_clean(struct mddev *mddev)
5461 {
5462         mddev->array_sectors = 0;
5463         mddev->external_size = 0;
5464         mddev->dev_sectors = 0;
5465         mddev->raid_disks = 0;
5466         mddev->recovery_cp = 0;
5467         mddev->resync_min = 0;
5468         mddev->resync_max = MaxSector;
5469         mddev->reshape_position = MaxSector;
5470         mddev->external = 0;
5471         mddev->persistent = 0;
5472         mddev->level = LEVEL_NONE;
5473         mddev->clevel[0] = 0;
5474         mddev->flags = 0;
5475         mddev->sb_flags = 0;
5476         mddev->ro = 0;
5477         mddev->metadata_type[0] = 0;
5478         mddev->chunk_sectors = 0;
5479         mddev->ctime = mddev->utime = 0;
5480         mddev->layout = 0;
5481         mddev->max_disks = 0;
5482         mddev->events = 0;
5483         mddev->can_decrease_events = 0;
5484         mddev->delta_disks = 0;
5485         mddev->reshape_backwards = 0;
5486         mddev->new_level = LEVEL_NONE;
5487         mddev->new_layout = 0;
5488         mddev->new_chunk_sectors = 0;
5489         mddev->curr_resync = 0;
5490         atomic64_set(&mddev->resync_mismatches, 0);
5491         mddev->suspend_lo = mddev->suspend_hi = 0;
5492         mddev->sync_speed_min = mddev->sync_speed_max = 0;
5493         mddev->recovery = 0;
5494         mddev->in_sync = 0;
5495         mddev->changed = 0;
5496         mddev->degraded = 0;
5497         mddev->safemode = 0;
5498         mddev->private = NULL;
5499         mddev->cluster_info = NULL;
5500         mddev->bitmap_info.offset = 0;
5501         mddev->bitmap_info.default_offset = 0;
5502         mddev->bitmap_info.default_space = 0;
5503         mddev->bitmap_info.chunksize = 0;
5504         mddev->bitmap_info.daemon_sleep = 0;
5505         mddev->bitmap_info.max_write_behind = 0;
5506         mddev->bitmap_info.nodes = 0;
5507 }
5508
5509 static void __md_stop_writes(struct mddev *mddev)
5510 {
5511         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5512         flush_workqueue(md_misc_wq);
5513         if (mddev->sync_thread) {
5514                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5515                 md_reap_sync_thread(mddev);
5516         }
5517
5518         del_timer_sync(&mddev->safemode_timer);
5519
5520         if (mddev->pers && mddev->pers->quiesce) {
5521                 mddev->pers->quiesce(mddev, 1);
5522                 mddev->pers->quiesce(mddev, 0);
5523         }
5524         bitmap_flush(mddev);
5525
5526         if (mddev->ro == 0 &&
5527             ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
5528              mddev->sb_flags)) {
5529                 /* mark array as shutdown cleanly */
5530                 if (!mddev_is_clustered(mddev))
5531                         mddev->in_sync = 1;
5532                 md_update_sb(mddev, 1);
5533         }
5534 }
5535
5536 void md_stop_writes(struct mddev *mddev)
5537 {
5538         mddev_lock_nointr(mddev);
5539         __md_stop_writes(mddev);
5540         mddev_unlock(mddev);
5541 }
5542 EXPORT_SYMBOL_GPL(md_stop_writes);
5543
5544 static void mddev_detach(struct mddev *mddev)
5545 {
5546         struct bitmap *bitmap = mddev->bitmap;
5547         /* wait for behind writes to complete */
5548         if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
5549                 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
5550                          mdname(mddev));
5551                 /* need to kick something here to make sure I/O goes? */
5552                 wait_event(bitmap->behind_wait,
5553                            atomic_read(&bitmap->behind_writes) == 0);
5554         }
5555         if (mddev->pers && mddev->pers->quiesce) {
5556                 mddev->pers->quiesce(mddev, 1);
5557                 mddev->pers->quiesce(mddev, 0);
5558         }
5559         md_unregister_thread(&mddev->thread);
5560         if (mddev->queue)
5561                 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
5562 }
5563
5564 static void __md_stop(struct mddev *mddev)
5565 {
5566         struct md_personality *pers = mddev->pers;
5567         mddev_detach(mddev);
5568         /* Ensure ->event_work is done */
5569         flush_workqueue(md_misc_wq);
5570         spin_lock(&mddev->lock);
5571         mddev->pers = NULL;
5572         spin_unlock(&mddev->lock);
5573         pers->free(mddev, mddev->private);
5574         mddev->private = NULL;
5575         if (pers->sync_request && mddev->to_remove == NULL)
5576                 mddev->to_remove = &md_redundancy_group;
5577         module_put(pers->owner);
5578         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5579 }
5580
5581 void md_stop(struct mddev *mddev)
5582 {
5583         /* stop the array and free an attached data structures.
5584          * This is called from dm-raid
5585          */
5586         __md_stop(mddev);
5587         bitmap_destroy(mddev);
5588         if (mddev->bio_set)
5589                 bioset_free(mddev->bio_set);
5590 }
5591
5592 EXPORT_SYMBOL_GPL(md_stop);
5593
5594 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5595 {
5596         int err = 0;
5597         int did_freeze = 0;
5598
5599         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5600                 did_freeze = 1;
5601                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5602                 md_wakeup_thread(mddev->thread);
5603         }
5604         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5605                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5606         if (mddev->sync_thread)
5607                 /* Thread might be blocked waiting for metadata update
5608                  * which will now never happen */
5609                 wake_up_process(mddev->sync_thread->tsk);
5610
5611         if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
5612                 return -EBUSY;
5613         mddev_unlock(mddev);
5614         wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
5615                                           &mddev->recovery));
5616         wait_event(mddev->sb_wait,
5617                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
5618         mddev_lock_nointr(mddev);
5619
5620         mutex_lock(&mddev->open_mutex);
5621         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5622             mddev->sync_thread ||
5623             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5624                 pr_warn("md: %s still in use.\n",mdname(mddev));
5625                 if (did_freeze) {
5626                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5627                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5628                         md_wakeup_thread(mddev->thread);
5629                 }
5630                 err = -EBUSY;
5631                 goto out;
5632         }
5633         if (mddev->pers) {
5634                 __md_stop_writes(mddev);
5635
5636                 err  = -ENXIO;
5637                 if (mddev->ro==1)
5638                         goto out;
5639                 mddev->ro = 1;
5640                 set_disk_ro(mddev->gendisk, 1);
5641                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5642                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5643                 md_wakeup_thread(mddev->thread);
5644                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5645                 err = 0;
5646         }
5647 out:
5648         mutex_unlock(&mddev->open_mutex);
5649         return err;
5650 }
5651
5652 /* mode:
5653  *   0 - completely stop and dis-assemble array
5654  *   2 - stop but do not disassemble array
5655  */
5656 static int do_md_stop(struct mddev *mddev, int mode,
5657                       struct block_device *bdev)
5658 {
5659         struct gendisk *disk = mddev->gendisk;
5660         struct md_rdev *rdev;
5661         int did_freeze = 0;
5662
5663         if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
5664                 did_freeze = 1;
5665                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5666                 md_wakeup_thread(mddev->thread);
5667         }
5668         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
5669                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5670         if (mddev->sync_thread)
5671                 /* Thread might be blocked waiting for metadata update
5672                  * which will now never happen */
5673                 wake_up_process(mddev->sync_thread->tsk);
5674
5675         mddev_unlock(mddev);
5676         wait_event(resync_wait, (mddev->sync_thread == NULL &&
5677                                  !test_bit(MD_RECOVERY_RUNNING,
5678                                            &mddev->recovery)));
5679         mddev_lock_nointr(mddev);
5680
5681         mutex_lock(&mddev->open_mutex);
5682         if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
5683             mddev->sysfs_active ||
5684             mddev->sync_thread ||
5685             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
5686                 pr_warn("md: %s still in use.\n",mdname(mddev));
5687                 mutex_unlock(&mddev->open_mutex);
5688                 if (did_freeze) {
5689                         clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5690                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5691                         md_wakeup_thread(mddev->thread);
5692                 }
5693                 return -EBUSY;
5694         }
5695         if (mddev->pers) {
5696                 if (mddev->ro)
5697                         set_disk_ro(disk, 0);
5698
5699                 __md_stop_writes(mddev);
5700                 __md_stop(mddev);
5701                 mddev->queue->backing_dev_info->congested_fn = NULL;
5702
5703                 /* tell userspace to handle 'inactive' */
5704                 sysfs_notify_dirent_safe(mddev->sysfs_state);
5705
5706                 rdev_for_each(rdev, mddev)
5707                         if (rdev->raid_disk >= 0)
5708                                 sysfs_unlink_rdev(mddev, rdev);
5709
5710                 set_capacity(disk, 0);
5711                 mutex_unlock(&mddev->open_mutex);
5712                 mddev->changed = 1;
5713                 revalidate_disk(disk);
5714
5715                 if (mddev->ro)
5716                         mddev->ro = 0;
5717         } else
5718                 mutex_unlock(&mddev->open_mutex);
5719         /*
5720          * Free resources if final stop
5721          */
5722         if (mode == 0) {
5723                 pr_info("md: %s stopped.\n", mdname(mddev));
5724
5725                 bitmap_destroy(mddev);
5726                 if (mddev->bitmap_info.file) {
5727                         struct file *f = mddev->bitmap_info.file;
5728                         spin_lock(&mddev->lock);
5729                         mddev->bitmap_info.file = NULL;
5730                         spin_unlock(&mddev->lock);
5731                         fput(f);
5732                 }
5733                 mddev->bitmap_info.offset = 0;
5734
5735                 export_array(mddev);
5736
5737                 md_clean(mddev);
5738                 if (mddev->hold_active == UNTIL_STOP)
5739                         mddev->hold_active = 0;
5740         }
5741         md_new_event(mddev);
5742         sysfs_notify_dirent_safe(mddev->sysfs_state);
5743         return 0;
5744 }
5745
5746 #ifndef MODULE
5747 static void autorun_array(struct mddev *mddev)
5748 {
5749         struct md_rdev *rdev;
5750         int err;
5751
5752         if (list_empty(&mddev->disks))
5753                 return;
5754
5755         pr_info("md: running: ");
5756
5757         rdev_for_each(rdev, mddev) {
5758                 char b[BDEVNAME_SIZE];
5759                 pr_cont("<%s>", bdevname(rdev->bdev,b));
5760         }
5761         pr_cont("\n");
5762
5763         err = do_md_run(mddev);
5764         if (err) {
5765                 pr_warn("md: do_md_run() returned %d\n", err);
5766                 do_md_stop(mddev, 0, NULL);
5767         }
5768 }
5769
5770 /*
5771  * lets try to run arrays based on all disks that have arrived
5772  * until now. (those are in pending_raid_disks)
5773  *
5774  * the method: pick the first pending disk, collect all disks with
5775  * the same UUID, remove all from the pending list and put them into
5776  * the 'same_array' list. Then order this list based on superblock
5777  * update time (freshest comes first), kick out 'old' disks and
5778  * compare superblocks. If everything's fine then run it.
5779  *
5780  * If "unit" is allocated, then bump its reference count
5781  */
5782 static void autorun_devices(int part)
5783 {
5784         struct md_rdev *rdev0, *rdev, *tmp;
5785         struct mddev *mddev;
5786         char b[BDEVNAME_SIZE];
5787
5788         pr_info("md: autorun ...\n");
5789         while (!list_empty(&pending_raid_disks)) {
5790                 int unit;
5791                 dev_t dev;
5792                 LIST_HEAD(candidates);
5793                 rdev0 = list_entry(pending_raid_disks.next,
5794                                          struct md_rdev, same_set);
5795
5796                 pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
5797                 INIT_LIST_HEAD(&candidates);
5798                 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
5799                         if (super_90_load(rdev, rdev0, 0) >= 0) {
5800                                 pr_debug("md:  adding %s ...\n",
5801                                          bdevname(rdev->bdev,b));
5802                                 list_move(&rdev->same_set, &candidates);
5803                         }
5804                 /*
5805                  * now we have a set of devices, with all of them having
5806                  * mostly sane superblocks. It's time to allocate the
5807                  * mddev.
5808                  */
5809                 if (part) {
5810                         dev = MKDEV(mdp_major,
5811                                     rdev0->preferred_minor << MdpMinorShift);
5812                         unit = MINOR(dev) >> MdpMinorShift;
5813                 } else {
5814                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
5815                         unit = MINOR(dev);
5816                 }
5817                 if (rdev0->preferred_minor != unit) {
5818                         pr_warn("md: unit number in %s is bad: %d\n",
5819                                 bdevname(rdev0->bdev, b), rdev0->preferred_minor);
5820                         break;
5821                 }
5822
5823                 md_probe(dev, NULL, NULL);
5824                 mddev = mddev_find(dev);
5825                 if (!mddev || !mddev->gendisk) {
5826                         if (mddev)
5827                                 mddev_put(mddev);
5828                         break;
5829                 }
5830                 if (mddev_lock(mddev))
5831                         pr_warn("md: %s locked, cannot run\n", mdname(mddev));
5832                 else if (mddev->raid_disks || mddev->major_version
5833                          || !list_empty(&mddev->disks)) {
5834                         pr_warn("md: %s already running, cannot run %s\n",
5835                                 mdname(mddev), bdevname(rdev0->bdev,b));
5836                         mddev_unlock(mddev);
5837                 } else {
5838                         pr_debug("md: created %s\n", mdname(mddev));
5839                         mddev->persistent = 1;
5840                         rdev_for_each_list(rdev, tmp, &candidates) {
5841                                 list_del_init(&rdev->same_set);
5842                                 if (bind_rdev_to_array(rdev, mddev))
5843                                         export_rdev(rdev);
5844                         }
5845                         autorun_array(mddev);
5846                         mddev_unlock(mddev);
5847                 }
5848                 /* on success, candidates will be empty, on error
5849                  * it won't...
5850                  */
5851                 rdev_for_each_list(rdev, tmp, &candidates) {
5852                         list_del_init(&rdev->same_set);
5853                         export_rdev(rdev);
5854                 }
5855                 mddev_put(mddev);
5856         }
5857         pr_info("md: ... autorun DONE.\n");
5858 }
5859 #endif /* !MODULE */
5860
5861 static int get_version(void __user *arg)
5862 {
5863         mdu_version_t ver;
5864
5865         ver.major = MD_MAJOR_VERSION;
5866         ver.minor = MD_MINOR_VERSION;
5867         ver.patchlevel = MD_PATCHLEVEL_VERSION;
5868
5869         if (copy_to_user(arg, &ver, sizeof(ver)))
5870                 return -EFAULT;
5871
5872         return 0;
5873 }
5874
5875 static int get_array_info(struct mddev *mddev, void __user *arg)
5876 {
5877         mdu_array_info_t info;
5878         int nr,working,insync,failed,spare;
5879         struct md_rdev *rdev;
5880
5881         nr = working = insync = failed = spare = 0;
5882         rcu_read_lock();
5883         rdev_for_each_rcu(rdev, mddev) {
5884                 nr++;
5885                 if (test_bit(Faulty, &rdev->flags))
5886                         failed++;
5887                 else {
5888                         working++;
5889                         if (test_bit(In_sync, &rdev->flags))
5890                                 insync++;
5891                         else if (test_bit(Journal, &rdev->flags))
5892                                 /* TODO: add journal count to md_u.h */
5893                                 ;
5894                         else
5895                                 spare++;
5896                 }
5897         }
5898         rcu_read_unlock();
5899
5900         info.major_version = mddev->major_version;
5901         info.minor_version = mddev->minor_version;
5902         info.patch_version = MD_PATCHLEVEL_VERSION;
5903         info.ctime         = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
5904         info.level         = mddev->level;
5905         info.size          = mddev->dev_sectors / 2;
5906         if (info.size != mddev->dev_sectors / 2) /* overflow */
5907                 info.size = -1;
5908         info.nr_disks      = nr;
5909         info.raid_disks    = mddev->raid_disks;
5910         info.md_minor      = mddev->md_minor;
5911         info.not_persistent= !mddev->persistent;
5912
5913         info.utime         = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
5914         info.state         = 0;
5915         if (mddev->in_sync)
5916                 info.state = (1<<MD_SB_CLEAN);
5917         if (mddev->bitmap && mddev->bitmap_info.offset)
5918                 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5919         if (mddev_is_clustered(mddev))
5920                 info.state |= (1<<MD_SB_CLUSTERED);
5921         info.active_disks  = insync;
5922         info.working_disks = working;
5923         info.failed_disks  = failed;
5924         info.spare_disks   = spare;
5925
5926         info.layout        = mddev->layout;
5927         info.chunk_size    = mddev->chunk_sectors << 9;
5928
5929         if (copy_to_user(arg, &info, sizeof(info)))
5930                 return -EFAULT;
5931
5932         return 0;
5933 }
5934
5935 static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5936 {
5937         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
5938         char *ptr;
5939         int err;
5940
5941         file = kzalloc(sizeof(*file), GFP_NOIO);
5942         if (!file)
5943                 return -ENOMEM;
5944
5945         err = 0;
5946         spin_lock(&mddev->lock);
5947         /* bitmap enabled */
5948         if (mddev->bitmap_info.file) {
5949                 ptr = file_path(mddev->bitmap_info.file, file->pathname,
5950                                 sizeof(file->pathname));
5951                 if (IS_ERR(ptr))
5952                         err = PTR_ERR(ptr);
5953                 else
5954                         memmove(file->pathname, ptr,
5955                                 sizeof(file->pathname)-(ptr-file->pathname));
5956         }
5957         spin_unlock(&mddev->lock);
5958
5959         if (err == 0 &&
5960             copy_to_user(arg, file, sizeof(*file)))
5961                 err = -EFAULT;
5962
5963         kfree(file);
5964         return err;
5965 }
5966
5967 static int get_disk_info(struct mddev *mddev, void __user * arg)
5968 {
5969         mdu_disk_info_t info;
5970         struct md_rdev *rdev;
5971
5972         if (copy_from_user(&info, arg, sizeof(info)))
5973                 return -EFAULT;
5974
5975         rcu_read_lock();
5976         rdev = md_find_rdev_nr_rcu(mddev, info.number);
5977         if (rdev) {
5978                 info.major = MAJOR(rdev->bdev->bd_dev);
5979                 info.minor = MINOR(rdev->bdev->bd_dev);
5980                 info.raid_disk = rdev->raid_disk;
5981                 info.state = 0;
5982                 if (test_bit(Faulty, &rdev->flags))
5983                         info.state |= (1<<MD_DISK_FAULTY);
5984                 else if (test_bit(In_sync, &rdev->flags)) {
5985                         info.state |= (1<<MD_DISK_ACTIVE);
5986                         info.state |= (1<<MD_DISK_SYNC);
5987                 }
5988                 if (test_bit(Journal, &rdev->flags))
5989                         info.state |= (1<<MD_DISK_JOURNAL);
5990                 if (test_bit(WriteMostly, &rdev->flags))
5991                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
5992                 if (test_bit(FailFast, &rdev->flags))
5993                         info.state |= (1<<MD_DISK_FAILFAST);
5994         } else {
5995                 info.major = info.minor = 0;
5996                 info.raid_disk = -1;
5997                 info.state = (1<<MD_DISK_REMOVED);
5998         }
5999         rcu_read_unlock();
6000
6001         if (copy_to_user(arg, &info, sizeof(info)))
6002                 return -EFAULT;
6003
6004         return 0;
6005 }
6006
6007 static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
6008 {
6009         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
6010         struct md_rdev *rdev;
6011         dev_t dev = MKDEV(info->major,info->minor);
6012
6013         if (mddev_is_clustered(mddev) &&
6014                 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
6015                 pr_warn("%s: Cannot add to clustered mddev.\n",
6016                         mdname(mddev));
6017                 return -EINVAL;
6018         }
6019
6020         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
6021                 return -EOVERFLOW;
6022
6023         if (!mddev->raid_disks) {
6024                 int err;
6025                 /* expecting a device which has a superblock */
6026                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6027                 if (IS_ERR(rdev)) {
6028                         pr_warn("md: md_import_device returned %ld\n",
6029                                 PTR_ERR(rdev));
6030                         return PTR_ERR(rdev);
6031                 }
6032                 if (!list_empty(&mddev->disks)) {
6033                         struct md_rdev *rdev0
6034                                 = list_entry(mddev->disks.next,
6035                                              struct md_rdev, same_set);
6036                         err = super_types[mddev->major_version]
6037                                 .load_super(rdev, rdev0, mddev->minor_version);
6038                         if (err < 0) {
6039                                 pr_warn("md: %s has different UUID to %s\n",
6040                                         bdevname(rdev->bdev,b),
6041                                         bdevname(rdev0->bdev,b2));
6042                                 export_rdev(rdev);
6043                                 return -EINVAL;
6044                         }
6045                 }
6046                 err = bind_rdev_to_array(rdev, mddev);
6047                 if (err)
6048                         export_rdev(rdev);
6049                 return err;
6050         }
6051
6052         /*
6053          * add_new_disk can be used once the array is assembled
6054          * to add "hot spares".  They must already have a superblock
6055          * written
6056          */
6057         if (mddev->pers) {
6058                 int err;
6059                 if (!mddev->pers->hot_add_disk) {
6060                         pr_warn("%s: personality does not support diskops!\n",
6061                                 mdname(mddev));
6062                         return -EINVAL;
6063                 }
6064                 if (mddev->persistent)
6065                         rdev = md_import_device(dev, mddev->major_version,
6066                                                 mddev->minor_version);
6067                 else
6068                         rdev = md_import_device(dev, -1, -1);
6069                 if (IS_ERR(rdev)) {
6070                         pr_warn("md: md_import_device returned %ld\n",
6071                                 PTR_ERR(rdev));
6072                         return PTR_ERR(rdev);
6073                 }
6074                 /* set saved_raid_disk if appropriate */
6075                 if (!mddev->persistent) {
6076                         if (info->state & (1<<MD_DISK_SYNC)  &&
6077                             info->raid_disk < mddev->raid_disks) {
6078                                 rdev->raid_disk = info->raid_disk;
6079                                 set_bit(In_sync, &rdev->flags);
6080                                 clear_bit(Bitmap_sync, &rdev->flags);
6081                         } else
6082                                 rdev->raid_disk = -1;
6083                         rdev->saved_raid_disk = rdev->raid_disk;
6084                 } else
6085                         super_types[mddev->major_version].
6086                                 validate_super(mddev, rdev);
6087                 if ((info->state & (1<<MD_DISK_SYNC)) &&
6088                      rdev->raid_disk != info->raid_disk) {
6089                         /* This was a hot-add request, but events doesn't
6090                          * match, so reject it.
6091                          */
6092                         export_rdev(rdev);
6093                         return -EINVAL;
6094                 }
6095
6096                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6097                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6098                         set_bit(WriteMostly, &rdev->flags);
6099                 else
6100                         clear_bit(WriteMostly, &rdev->flags);
6101                 if (info->state & (1<<MD_DISK_FAILFAST))
6102                         set_bit(FailFast, &rdev->flags);
6103                 else
6104                         clear_bit(FailFast, &rdev->flags);
6105
6106                 if (info->state & (1<<MD_DISK_JOURNAL)) {
6107                         struct md_rdev *rdev2;
6108                         bool has_journal = false;
6109
6110                         /* make sure no existing journal disk */
6111                         rdev_for_each(rdev2, mddev) {
6112                                 if (test_bit(Journal, &rdev2->flags)) {
6113                                         has_journal = true;
6114                                         break;
6115                                 }
6116                         }
6117                         if (has_journal) {
6118                                 export_rdev(rdev);
6119                                 return -EBUSY;
6120                         }
6121                         set_bit(Journal, &rdev->flags);
6122                 }
6123                 /*
6124                  * check whether the device shows up in other nodes
6125                  */
6126                 if (mddev_is_clustered(mddev)) {
6127                         if (info->state & (1 << MD_DISK_CANDIDATE))
6128                                 set_bit(Candidate, &rdev->flags);
6129                         else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
6130                                 /* --add initiated by this node */
6131                                 err = md_cluster_ops->add_new_disk(mddev, rdev);
6132                                 if (err) {
6133                                         export_rdev(rdev);
6134                                         return err;
6135                                 }
6136                         }
6137                 }
6138
6139                 rdev->raid_disk = -1;
6140                 err = bind_rdev_to_array(rdev, mddev);
6141
6142                 if (err)
6143                         export_rdev(rdev);
6144
6145                 if (mddev_is_clustered(mddev)) {
6146                         if (info->state & (1 << MD_DISK_CANDIDATE)) {
6147                                 if (!err) {
6148                                         err = md_cluster_ops->new_disk_ack(mddev,
6149                                                 err == 0);
6150                                         if (err)
6151                                                 md_kick_rdev_from_array(rdev);
6152                                 }
6153                         } else {
6154                                 if (err)
6155                                         md_cluster_ops->add_new_disk_cancel(mddev);
6156                                 else
6157                                         err = add_bound_rdev(rdev);
6158                         }
6159
6160                 } else if (!err)
6161                         err = add_bound_rdev(rdev);
6162
6163                 return err;
6164         }
6165
6166         /* otherwise, add_new_disk is only allowed
6167          * for major_version==0 superblocks
6168          */
6169         if (mddev->major_version != 0) {
6170                 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
6171                 return -EINVAL;
6172         }
6173
6174         if (!(info->state & (1<<MD_DISK_FAULTY))) {
6175                 int err;
6176                 rdev = md_import_device(dev, -1, 0);
6177                 if (IS_ERR(rdev)) {
6178                         pr_warn("md: error, md_import_device() returned %ld\n",
6179                                 PTR_ERR(rdev));
6180                         return PTR_ERR(rdev);
6181                 }
6182                 rdev->desc_nr = info->number;
6183                 if (info->raid_disk < mddev->raid_disks)
6184                         rdev->raid_disk = info->raid_disk;
6185                 else
6186                         rdev->raid_disk = -1;
6187
6188                 if (rdev->raid_disk < mddev->raid_disks)
6189                         if (info->state & (1<<MD_DISK_SYNC))
6190                                 set_bit(In_sync, &rdev->flags);
6191
6192                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
6193                         set_bit(WriteMostly, &rdev->flags);
6194                 if (info->state & (1<<MD_DISK_FAILFAST))
6195                         set_bit(FailFast, &rdev->flags);
6196
6197                 if (!mddev->persistent) {
6198                         pr_debug("md: nonpersistent superblock ...\n");
6199                         rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6200                 } else
6201                         rdev->sb_start = calc_dev_sboffset(rdev);
6202                 rdev->sectors = rdev->sb_start;
6203
6204                 err = bind_rdev_to_array(rdev, mddev);
6205                 if (err) {
6206                         export_rdev(rdev);
6207                         return err;
6208                 }
6209         }
6210
6211         return 0;
6212 }
6213
6214 static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6215 {
6216         char b[BDEVNAME_SIZE];
6217         struct md_rdev *rdev;
6218
6219         rdev = find_rdev(mddev, dev);
6220         if (!rdev)
6221                 return -ENXIO;
6222
6223         if (rdev->raid_disk < 0)
6224                 goto kick_rdev;
6225
6226         clear_bit(Blocked, &rdev->flags);
6227         remove_and_add_spares(mddev, rdev);
6228
6229         if (rdev->raid_disk >= 0)
6230                 goto busy;
6231
6232 kick_rdev:
6233         if (mddev_is_clustered(mddev))
6234                 md_cluster_ops->remove_disk(mddev, rdev);
6235
6236         md_kick_rdev_from_array(rdev);
6237         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6238         if (mddev->thread)
6239                 md_wakeup_thread(mddev->thread);
6240         else
6241                 md_update_sb(mddev, 1);
6242         md_new_event(mddev);
6243
6244         return 0;
6245 busy:
6246         pr_debug("md: cannot remove active disk %s from %s ...\n",
6247                  bdevname(rdev->bdev,b), mdname(mddev));
6248         return -EBUSY;
6249 }
6250
6251 static int hot_add_disk(struct mddev *mddev, dev_t dev)
6252 {
6253         char b[BDEVNAME_SIZE];
6254         int err;
6255         struct md_rdev *rdev;
6256
6257         if (!mddev->pers)
6258                 return -ENODEV;
6259
6260         if (mddev->major_version != 0) {
6261                 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
6262                         mdname(mddev));
6263                 return -EINVAL;
6264         }
6265         if (!mddev->pers->hot_add_disk) {
6266                 pr_warn("%s: personality does not support diskops!\n",
6267                         mdname(mddev));
6268                 return -EINVAL;
6269         }
6270
6271         rdev = md_import_device(dev, -1, 0);
6272         if (IS_ERR(rdev)) {
6273                 pr_warn("md: error, md_import_device() returned %ld\n",
6274                         PTR_ERR(rdev));
6275                 return -EINVAL;
6276         }
6277
6278         if (mddev->persistent)
6279                 rdev->sb_start = calc_dev_sboffset(rdev);
6280         else
6281                 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6282
6283         rdev->sectors = rdev->sb_start;
6284
6285         if (test_bit(Faulty, &rdev->flags)) {
6286                 pr_warn("md: can not hot-add faulty %s disk to %s!\n",
6287                         bdevname(rdev->bdev,b), mdname(mddev));
6288                 err = -EINVAL;
6289                 goto abort_export;
6290         }
6291
6292         clear_bit(In_sync, &rdev->flags);
6293         rdev->desc_nr = -1;
6294         rdev->saved_raid_disk = -1;
6295         err = bind_rdev_to_array(rdev, mddev);
6296         if (err)
6297                 goto abort_export;
6298
6299         /*
6300          * The rest should better be atomic, we can have disk failures
6301          * noticed in interrupt contexts ...
6302          */
6303
6304         rdev->raid_disk = -1;
6305
6306         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6307         if (!mddev->thread)
6308                 md_update_sb(mddev, 1);
6309         /*
6310          * Kick recovery, maybe this spare has to be added to the
6311          * array immediately.
6312          */
6313         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
6314         md_wakeup_thread(mddev->thread);
6315         md_new_event(mddev);
6316         return 0;
6317
6318 abort_export:
6319         export_rdev(rdev);
6320         return err;
6321 }
6322
6323 static int set_bitmap_file(struct mddev *mddev, int fd)
6324 {
6325         int err = 0;
6326
6327         if (mddev->pers) {
6328                 if (!mddev->pers->quiesce || !mddev->thread)
6329                         return -EBUSY;
6330                 if (mddev->recovery || mddev->sync_thread)
6331                         return -EBUSY;
6332                 /* we should be able to change the bitmap.. */
6333         }
6334
6335         if (fd >= 0) {
6336                 struct inode *inode;
6337                 struct file *f;
6338
6339                 if (mddev->bitmap || mddev->bitmap_info.file)
6340                         return -EEXIST; /* cannot add when bitmap is present */
6341                 f = fget(fd);
6342
6343                 if (f == NULL) {
6344                         pr_warn("%s: error: failed to get bitmap file\n",
6345                                 mdname(mddev));
6346                         return -EBADF;
6347                 }
6348
6349                 inode = f->f_mapping->host;
6350                 if (!S_ISREG(inode->i_mode)) {
6351                         pr_warn("%s: error: bitmap file must be a regular file\n",
6352                                 mdname(mddev));
6353                         err = -EBADF;
6354                 } else if (!(f->f_mode & FMODE_WRITE)) {
6355                         pr_warn("%s: error: bitmap file must open for write\n",
6356                                 mdname(mddev));
6357                         err = -EBADF;
6358                 } else if (atomic_read(&inode->i_writecount) != 1) {
6359                         pr_warn("%s: error: bitmap file is already in use\n",
6360                                 mdname(mddev));
6361                         err = -EBUSY;
6362                 }
6363                 if (err) {
6364                         fput(f);
6365                         return err;
6366                 }
6367                 mddev->bitmap_info.file = f;
6368                 mddev->bitmap_info.offset = 0; /* file overrides offset */
6369         } else if (mddev->bitmap == NULL)
6370                 return -ENOENT; /* cannot remove what isn't there */
6371         err = 0;
6372         if (mddev->pers) {
6373                 mddev->pers->quiesce(mddev, 1);
6374                 if (fd >= 0) {
6375                         struct bitmap *bitmap;
6376
6377                         bitmap = bitmap_create(mddev, -1);
6378                         if (!IS_ERR(bitmap)) {
6379                                 mddev->bitmap = bitmap;
6380                                 err = bitmap_load(mddev);
6381                         } else
6382                                 err = PTR_ERR(bitmap);
6383                 }
6384                 if (fd < 0 || err) {
6385                         bitmap_destroy(mddev);
6386                         fd = -1; /* make sure to put the file */
6387                 }
6388                 mddev->pers->quiesce(mddev, 0);
6389         }
6390         if (fd < 0) {
6391                 struct file *f = mddev->bitmap_info.file;
6392                 if (f) {
6393                         spin_lock(&mddev->lock);
6394                         mddev->bitmap_info.file = NULL;
6395                         spin_unlock(&mddev->lock);
6396                         fput(f);
6397                 }
6398         }
6399
6400         return err;
6401 }
6402
6403 /*
6404  * set_array_info is used two different ways
6405  * The original usage is when creating a new array.
6406  * In this usage, raid_disks is > 0 and it together with
6407  *  level, size, not_persistent,layout,chunksize determine the
6408  *  shape of the array.
6409  *  This will always create an array with a type-0.90.0 superblock.
6410  * The newer usage is when assembling an array.
6411  *  In this case raid_disks will be 0, and the major_version field is
6412  *  use to determine which style super-blocks are to be found on the devices.
6413  *  The minor and patch _version numbers are also kept incase the
6414  *  super_block handler wishes to interpret them.
6415  */
6416 static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
6417 {
6418
6419         if (info->raid_disks == 0) {
6420                 /* just setting version number for superblock loading */
6421                 if (info->major_version < 0 ||
6422                     info->major_version >= ARRAY_SIZE(super_types) ||
6423                     super_types[info->major_version].name == NULL) {
6424                         /* maybe try to auto-load a module? */
6425                         pr_warn("md: superblock version %d not known\n",
6426                                 info->major_version);
6427                         return -EINVAL;
6428                 }
6429                 mddev->major_version = info->major_version;
6430                 mddev->minor_version = info->minor_version;
6431                 mddev->patch_version = info->patch_version;
6432                 mddev->persistent = !info->not_persistent;
6433                 /* ensure mddev_put doesn't delete this now that there
6434                  * is some minimal configuration.
6435                  */
6436                 mddev->ctime         = ktime_get_real_seconds();
6437                 return 0;
6438         }
6439         mddev->major_version = MD_MAJOR_VERSION;
6440         mddev->minor_version = MD_MINOR_VERSION;
6441         mddev->patch_version = MD_PATCHLEVEL_VERSION;
6442         mddev->ctime         = ktime_get_real_seconds();
6443
6444         mddev->level         = info->level;
6445         mddev->clevel[0]     = 0;
6446         mddev->dev_sectors   = 2 * (sector_t)info->size;
6447         mddev->raid_disks    = info->raid_disks;
6448         /* don't set md_minor, it is determined by which /dev/md* was
6449          * openned
6450          */
6451         if (info->state & (1<<MD_SB_CLEAN))
6452                 mddev->recovery_cp = MaxSector;
6453         else
6454                 mddev->recovery_cp = 0;
6455         mddev->persistent    = ! info->not_persistent;
6456         mddev->external      = 0;
6457
6458         mddev->layout        = info->layout;
6459         mddev->chunk_sectors = info->chunk_size >> 9;
6460
6461         mddev->max_disks     = MD_SB_DISKS;
6462
6463         if (mddev->persistent) {
6464                 mddev->flags         = 0;
6465                 mddev->sb_flags         = 0;
6466         }
6467         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6468
6469         mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
6470         mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
6471         mddev->bitmap_info.offset = 0;
6472
6473         mddev->reshape_position = MaxSector;
6474
6475         /*
6476          * Generate a 128 bit UUID
6477          */
6478         get_random_bytes(mddev->uuid, 16);
6479
6480         mddev->new_level = mddev->level;
6481         mddev->new_chunk_sectors = mddev->chunk_sectors;
6482         mddev->new_layout = mddev->layout;
6483         mddev->delta_disks = 0;
6484         mddev->reshape_backwards = 0;
6485
6486         return 0;
6487 }
6488
6489 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
6490 {
6491         WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
6492
6493         if (mddev->external_size)
6494                 return;
6495
6496         mddev->array_sectors = array_sectors;
6497 }
6498 EXPORT_SYMBOL(md_set_array_sectors);
6499
6500 static int update_size(struct mddev *mddev, sector_t num_sectors)
6501 {
6502         struct md_rdev *rdev;
6503         int rv;
6504         int fit = (num_sectors == 0);
6505
6506         /* cluster raid doesn't support update size */
6507         if (mddev_is_clustered(mddev))
6508                 return -EINVAL;
6509
6510         if (mddev->pers->resize == NULL)
6511                 return -EINVAL;
6512         /* The "num_sectors" is the number of sectors of each device that
6513          * is used.  This can only make sense for arrays with redundancy.
6514          * linear and raid0 always use whatever space is available. We can only
6515          * consider changing this number if no resync or reconstruction is
6516          * happening, and if the new size is acceptable. It must fit before the
6517          * sb_start or, if that is <data_offset, it must fit before the size
6518          * of each device.  If num_sectors is zero, we find the largest size
6519          * that fits.
6520          */
6521         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6522             mddev->sync_thread)
6523                 return -EBUSY;
6524         if (mddev->ro)
6525                 return -EROFS;
6526
6527         rdev_for_each(rdev, mddev) {
6528                 sector_t avail = rdev->sectors;
6529
6530                 if (fit && (num_sectors == 0 || num_sectors > avail))
6531                         num_sectors = avail;
6532                 if (avail < num_sectors)
6533                         return -ENOSPC;
6534         }
6535         rv = mddev->pers->resize(mddev, num_sectors);
6536         if (!rv)
6537                 revalidate_disk(mddev->gendisk);
6538         return rv;
6539 }
6540
6541 static int update_raid_disks(struct mddev *mddev, int raid_disks)
6542 {
6543         int rv;
6544         struct md_rdev *rdev;
6545         /* change the number of raid disks */
6546         if (mddev->pers->check_reshape == NULL)
6547                 return -EINVAL;
6548         if (mddev->ro)
6549                 return -EROFS;
6550         if (raid_disks <= 0 ||
6551             (mddev->max_disks && raid_disks >= mddev->max_disks))
6552                 return -EINVAL;
6553         if (mddev->sync_thread ||
6554             test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6555             mddev->reshape_position != MaxSector)
6556                 return -EBUSY;
6557
6558         rdev_for_each(rdev, mddev) {
6559                 if (mddev->raid_disks < raid_disks &&
6560                     rdev->data_offset < rdev->new_data_offset)
6561                         return -EINVAL;
6562                 if (mddev->raid_disks > raid_disks &&
6563                     rdev->data_offset > rdev->new_data_offset)
6564                         return -EINVAL;
6565         }
6566
6567         mddev->delta_disks = raid_disks - mddev->raid_disks;
6568         if (mddev->delta_disks < 0)
6569                 mddev->reshape_backwards = 1;
6570         else if (mddev->delta_disks > 0)
6571                 mddev->reshape_backwards = 0;
6572
6573         rv = mddev->pers->check_reshape(mddev);
6574         if (rv < 0) {
6575                 mddev->delta_disks = 0;
6576                 mddev->reshape_backwards = 0;
6577         }
6578         return rv;
6579 }
6580
6581 /*
6582  * update_array_info is used to change the configuration of an
6583  * on-line array.
6584  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
6585  * fields in the info are checked against the array.
6586  * Any differences that cannot be handled will cause an error.
6587  * Normally, only one change can be managed at a time.
6588  */
6589 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
6590 {
6591         int rv = 0;
6592         int cnt = 0;
6593         int state = 0;
6594
6595         /* calculate expected state,ignoring low bits */
6596         if (mddev->bitmap && mddev->bitmap_info.offset)
6597                 state |= (1 << MD_SB_BITMAP_PRESENT);
6598
6599         if (mddev->major_version != info->major_version ||
6600             mddev->minor_version != info->minor_version ||
6601 /*          mddev->patch_version != info->patch_version || */
6602             mddev->ctime         != info->ctime         ||
6603             mddev->level         != info->level         ||
6604 /*          mddev->layout        != info->layout        || */
6605             mddev->persistent    != !info->not_persistent ||
6606             mddev->chunk_sectors != info->chunk_size >> 9 ||
6607             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
6608             ((state^info->state) & 0xfffffe00)
6609                 )
6610                 return -EINVAL;
6611         /* Check there is only one change */
6612         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6613                 cnt++;
6614         if (mddev->raid_disks != info->raid_disks)
6615                 cnt++;
6616         if (mddev->layout != info->layout)
6617                 cnt++;
6618         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
6619                 cnt++;
6620         if (cnt == 0)
6621                 return 0;
6622         if (cnt > 1)
6623                 return -EINVAL;
6624
6625         if (mddev->layout != info->layout) {
6626                 /* Change layout
6627                  * we don't need to do anything at the md level, the
6628                  * personality will take care of it all.
6629                  */
6630                 if (mddev->pers->check_reshape == NULL)
6631                         return -EINVAL;
6632                 else {
6633                         mddev->new_layout = info->layout;
6634                         rv = mddev->pers->check_reshape(mddev);
6635                         if (rv)
6636                                 mddev->new_layout = mddev->layout;
6637                         return rv;
6638                 }
6639         }
6640         if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
6641                 rv = update_size(mddev, (sector_t)info->size * 2);
6642
6643         if (mddev->raid_disks    != info->raid_disks)
6644                 rv = update_raid_disks(mddev, info->raid_disks);
6645
6646         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
6647                 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
6648                         rv = -EINVAL;
6649                         goto err;
6650                 }
6651                 if (mddev->recovery || mddev->sync_thread) {
6652                         rv = -EBUSY;
6653                         goto err;
6654                 }
6655                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
6656                         struct bitmap *bitmap;
6657                         /* add the bitmap */
6658                         if (mddev->bitmap) {
6659                                 rv = -EEXIST;
6660                                 goto err;
6661                         }
6662                         if (mddev->bitmap_info.default_offset == 0) {
6663                                 rv = -EINVAL;
6664                                 goto err;
6665                         }
6666                         mddev->bitmap_info.offset =
6667                                 mddev->bitmap_info.default_offset;
6668                         mddev->bitmap_info.space =
6669                                 mddev->bitmap_info.default_space;
6670                         mddev->pers->quiesce(mddev, 1);
6671                         bitmap = bitmap_create(mddev, -1);
6672                         if (!IS_ERR(bitmap)) {
6673                                 mddev->bitmap = bitmap;
6674                                 rv = bitmap_load(mddev);
6675                         } else
6676                                 rv = PTR_ERR(bitmap);
6677                         if (rv)
6678                                 bitmap_destroy(mddev);
6679                         mddev->pers->quiesce(mddev, 0);
6680                 } else {
6681                         /* remove the bitmap */
6682                         if (!mddev->bitmap) {
6683                                 rv = -ENOENT;
6684                                 goto err;
6685                         }
6686                         if (mddev->bitmap->storage.file) {
6687                                 rv = -EINVAL;
6688                                 goto err;
6689                         }
6690                         if (mddev->bitmap_info.nodes) {
6691                                 /* hold PW on all the bitmap lock */
6692                                 if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
6693                                         pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
6694                                         rv = -EPERM;
6695                                         md_cluster_ops->unlock_all_bitmaps(mddev);
6696                                         goto err;
6697                                 }
6698
6699                                 mddev->bitmap_info.nodes = 0;
6700                                 md_cluster_ops->leave(mddev);
6701                         }
6702                         mddev->pers->quiesce(mddev, 1);
6703                         bitmap_destroy(mddev);
6704                         mddev->pers->quiesce(mddev, 0);
6705                         mddev->bitmap_info.offset = 0;
6706                 }
6707         }
6708         md_update_sb(mddev, 1);
6709         return rv;
6710 err:
6711         return rv;
6712 }
6713
6714 static int set_disk_faulty(struct mddev *mddev, dev_t dev)
6715 {
6716         struct md_rdev *rdev;
6717         int err = 0;
6718
6719         if (mddev->pers == NULL)
6720                 return -ENODEV;
6721
6722         rcu_read_lock();
6723         rdev = find_rdev_rcu(mddev, dev);
6724         if (!rdev)
6725                 err =  -ENODEV;
6726         else {
6727                 md_error(mddev, rdev);
6728                 if (!test_bit(Faulty, &rdev->flags))
6729                         err = -EBUSY;
6730         }
6731         rcu_read_unlock();
6732         return err;
6733 }
6734
6735 /*
6736  * We have a problem here : there is no easy way to give a CHS
6737  * virtual geometry. We currently pretend that we have a 2 heads
6738  * 4 sectors (with a BIG number of cylinders...). This drives
6739  * dosfs just mad... ;-)
6740  */
6741 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
6742 {
6743         struct mddev *mddev = bdev->bd_disk->private_data;
6744
6745         geo->heads = 2;
6746         geo->sectors = 4;
6747         geo->cylinders = mddev->array_sectors / 8;
6748         return 0;
6749 }
6750
6751 static inline bool md_ioctl_valid(unsigned int cmd)
6752 {
6753         switch (cmd) {
6754         case ADD_NEW_DISK:
6755         case BLKROSET:
6756         case GET_ARRAY_INFO:
6757         case GET_BITMAP_FILE:
6758         case GET_DISK_INFO:
6759         case HOT_ADD_DISK:
6760         case HOT_REMOVE_DISK:
6761         case RAID_AUTORUN:
6762         case RAID_VERSION:
6763         case RESTART_ARRAY_RW:
6764         case RUN_ARRAY:
6765         case SET_ARRAY_INFO:
6766         case SET_BITMAP_FILE:
6767         case SET_DISK_FAULTY:
6768         case STOP_ARRAY:
6769         case STOP_ARRAY_RO:
6770         case CLUSTERED_DISK_NACK:
6771                 return true;
6772         default:
6773                 return false;
6774         }
6775 }
6776
6777 static int md_ioctl(struct block_device *bdev, fmode_t mode,
6778                         unsigned int cmd, unsigned long arg)
6779 {
6780         int err = 0;
6781         void __user *argp = (void __user *)arg;
6782         struct mddev *mddev = NULL;
6783         int ro;
6784
6785         if (!md_ioctl_valid(cmd))
6786                 return -ENOTTY;
6787
6788         switch (cmd) {
6789         case RAID_VERSION:
6790         case GET_ARRAY_INFO:
6791         case GET_DISK_INFO:
6792                 break;
6793         default:
6794                 if (!capable(CAP_SYS_ADMIN))
6795                         return -EACCES;
6796         }
6797
6798         /*
6799          * Commands dealing with the RAID driver but not any
6800          * particular array:
6801          */
6802         switch (cmd) {
6803         case RAID_VERSION:
6804                 err = get_version(argp);
6805                 goto out;
6806
6807 #ifndef MODULE
6808         case RAID_AUTORUN:
6809                 err = 0;
6810                 autostart_arrays(arg);
6811                 goto out;
6812 #endif
6813         default:;
6814         }
6815
6816         /*
6817          * Commands creating/starting a new array:
6818          */
6819
6820         mddev = bdev->bd_disk->private_data;
6821
6822         if (!mddev) {
6823                 BUG();
6824                 goto out;
6825         }
6826
6827         /* Some actions do not requires the mutex */
6828         switch (cmd) {
6829         case GET_ARRAY_INFO:
6830                 if (!mddev->raid_disks && !mddev->external)
6831                         err = -ENODEV;
6832                 else
6833                         err = get_array_info(mddev, argp);
6834                 goto out;
6835
6836         case GET_DISK_INFO:
6837                 if (!mddev->raid_disks && !mddev->external)
6838                         err = -ENODEV;
6839                 else
6840                         err = get_disk_info(mddev, argp);
6841                 goto out;
6842
6843         case SET_DISK_FAULTY:
6844                 err = set_disk_faulty(mddev, new_decode_dev(arg));
6845                 goto out;
6846
6847         case GET_BITMAP_FILE:
6848                 err = get_bitmap_file(mddev, argp);
6849                 goto out;
6850
6851         }
6852
6853         if (cmd == ADD_NEW_DISK)
6854                 /* need to ensure md_delayed_delete() has completed */
6855                 flush_workqueue(md_misc_wq);
6856
6857         if (cmd == HOT_REMOVE_DISK)
6858                 /* need to ensure recovery thread has run */
6859                 wait_event_interruptible_timeout(mddev->sb_wait,
6860                                                  !test_bit(MD_RECOVERY_NEEDED,
6861                                                            &mddev->recovery),
6862                                                  msecs_to_jiffies(5000));
6863         if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
6864                 /* Need to flush page cache, and ensure no-one else opens
6865                  * and writes
6866                  */
6867                 mutex_lock(&mddev->open_mutex);
6868                 if (mddev->pers && atomic_read(&mddev->openers) > 1) {
6869                         mutex_unlock(&mddev->open_mutex);
6870                         err = -EBUSY;
6871                         goto out;
6872                 }
6873                 set_bit(MD_CLOSING, &mddev->flags);
6874                 mutex_unlock(&mddev->open_mutex);
6875                 sync_blockdev(bdev);
6876         }
6877         err = mddev_lock(mddev);
6878         if (err) {
6879                 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
6880                          err, cmd);
6881                 goto out;
6882         }
6883
6884         if (cmd == SET_ARRAY_INFO) {
6885                 mdu_array_info_t info;
6886                 if (!arg)
6887                         memset(&info, 0, sizeof(info));
6888                 else if (copy_from_user(&info, argp, sizeof(info))) {
6889                         err = -EFAULT;
6890                         goto unlock;
6891                 }
6892                 if (mddev->pers) {
6893                         err = update_array_info(mddev, &info);
6894                         if (err) {
6895                                 pr_warn("md: couldn't update array info. %d\n", err);
6896                                 goto unlock;
6897                         }
6898                         goto unlock;
6899                 }
6900                 if (!list_empty(&mddev->disks)) {
6901                         pr_warn("md: array %s already has disks!\n", mdname(mddev));
6902                         err = -EBUSY;
6903                         goto unlock;
6904                 }
6905                 if (mddev->raid_disks) {
6906                         pr_warn("md: array %s already initialised!\n", mdname(mddev));
6907                         err = -EBUSY;
6908                         goto unlock;
6909                 }
6910                 err = set_array_info(mddev, &info);
6911                 if (err) {
6912                         pr_warn("md: couldn't set array info. %d\n", err);
6913                         goto unlock;
6914                 }
6915                 goto unlock;
6916         }
6917
6918         /*
6919          * Commands querying/configuring an existing array:
6920          */
6921         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
6922          * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
6923         if ((!mddev->raid_disks && !mddev->external)
6924             && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
6925             && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
6926             && cmd != GET_BITMAP_FILE) {
6927                 err = -ENODEV;
6928                 goto unlock;
6929         }
6930
6931         /*
6932          * Commands even a read-only array can execute:
6933          */
6934         switch (cmd) {
6935         case RESTART_ARRAY_RW:
6936                 err = restart_array(mddev);
6937                 goto unlock;
6938
6939         case STOP_ARRAY:
6940                 err = do_md_stop(mddev, 0, bdev);
6941                 goto unlock;
6942
6943         case STOP_ARRAY_RO:
6944                 err = md_set_readonly(mddev, bdev);
6945                 goto unlock;
6946
6947         case HOT_REMOVE_DISK:
6948                 err = hot_remove_disk(mddev, new_decode_dev(arg));
6949                 goto unlock;
6950
6951         case ADD_NEW_DISK:
6952                 /* We can support ADD_NEW_DISK on read-only arrays
6953                  * only if we are re-adding a preexisting device.
6954                  * So require mddev->pers and MD_DISK_SYNC.
6955                  */
6956                 if (mddev->pers) {
6957                         mdu_disk_info_t info;
6958                         if (copy_from_user(&info, argp, sizeof(info)))
6959                                 err = -EFAULT;
6960                         else if (!(info.state & (1<<MD_DISK_SYNC)))
6961                                 /* Need to clear read-only for this */
6962                                 break;
6963                         else
6964                                 err = add_new_disk(mddev, &info);
6965                         goto unlock;
6966                 }
6967                 break;
6968
6969         case BLKROSET:
6970                 if (get_user(ro, (int __user *)(arg))) {
6971                         err = -EFAULT;
6972                         goto unlock;
6973                 }
6974                 err = -EINVAL;
6975
6976                 /* if the bdev is going readonly the value of mddev->ro
6977                  * does not matter, no writes are coming
6978                  */
6979                 if (ro)
6980                         goto unlock;
6981
6982                 /* are we are already prepared for writes? */
6983                 if (mddev->ro != 1)
6984                         goto unlock;
6985
6986                 /* transitioning to readauto need only happen for
6987                  * arrays that call md_write_start
6988                  */
6989                 if (mddev->pers) {
6990                         err = restart_array(mddev);
6991                         if (err == 0) {
6992                                 mddev->ro = 2;
6993                                 set_disk_ro(mddev->gendisk, 0);
6994                         }
6995                 }
6996                 goto unlock;
6997         }
6998
6999         /*
7000          * The remaining ioctls are changing the state of the
7001          * superblock, so we do not allow them on read-only arrays.
7002          */
7003         if (mddev->ro && mddev->pers) {
7004                 if (mddev->ro == 2) {
7005                         mddev->ro = 0;
7006                         sysfs_notify_dirent_safe(mddev->sysfs_state);
7007                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7008                         /* mddev_unlock will wake thread */
7009                         /* If a device failed while we were read-only, we
7010                          * need to make sure the metadata is updated now.
7011                          */
7012                         if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
7013                                 mddev_unlock(mddev);
7014                                 wait_event(mddev->sb_wait,
7015                                            !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
7016                                            !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7017                                 mddev_lock_nointr(mddev);
7018                         }
7019                 } else {
7020                         err = -EROFS;
7021                         goto unlock;
7022                 }
7023         }
7024
7025         switch (cmd) {
7026         case ADD_NEW_DISK:
7027         {
7028                 mdu_disk_info_t info;
7029                 if (copy_from_user(&info, argp, sizeof(info)))
7030                         err = -EFAULT;
7031                 else
7032                         err = add_new_disk(mddev, &info);
7033                 goto unlock;
7034         }
7035
7036         case CLUSTERED_DISK_NACK:
7037                 if (mddev_is_clustered(mddev))
7038                         md_cluster_ops->new_disk_ack(mddev, false);
7039                 else
7040                         err = -EINVAL;
7041                 goto unlock;
7042
7043         case HOT_ADD_DISK:
7044                 err = hot_add_disk(mddev, new_decode_dev(arg));
7045                 goto unlock;
7046
7047         case RUN_ARRAY:
7048                 err = do_md_run(mddev);
7049                 goto unlock;
7050
7051         case SET_BITMAP_FILE:
7052                 err = set_bitmap_file(mddev, (int)arg);
7053                 goto unlock;
7054
7055         default:
7056                 err = -EINVAL;
7057                 goto unlock;
7058         }
7059
7060 unlock:
7061         if (mddev->hold_active == UNTIL_IOCTL &&
7062             err != -EINVAL)
7063                 mddev->hold_active = 0;
7064         mddev_unlock(mddev);
7065 out:
7066         return err;
7067 }
7068 #ifdef CONFIG_COMPAT
7069 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
7070                     unsigned int cmd, unsigned long arg)
7071 {
7072         switch (cmd) {
7073         case HOT_REMOVE_DISK:
7074         case HOT_ADD_DISK:
7075         case SET_DISK_FAULTY:
7076         case SET_BITMAP_FILE:
7077                 /* These take in integer arg, do not convert */
7078                 break;
7079         default:
7080                 arg = (unsigned long)compat_ptr(arg);
7081                 break;
7082         }
7083
7084         return md_ioctl(bdev, mode, cmd, arg);
7085 }
7086 #endif /* CONFIG_COMPAT */
7087
7088 static int md_open(struct block_device *bdev, fmode_t mode)
7089 {
7090         /*
7091          * Succeed if we can lock the mddev, which confirms that
7092          * it isn't being stopped right now.
7093          */
7094         struct mddev *mddev = mddev_find(bdev->bd_dev);
7095         int err;
7096
7097         if (!mddev)
7098                 return -ENODEV;
7099
7100         if (mddev->gendisk != bdev->bd_disk) {
7101                 /* we are racing with mddev_put which is discarding this
7102                  * bd_disk.
7103                  */
7104                 mddev_put(mddev);
7105                 /* Wait until bdev->bd_disk is definitely gone */
7106                 flush_workqueue(md_misc_wq);
7107                 /* Then retry the open from the top */
7108                 return -ERESTARTSYS;
7109         }
7110         BUG_ON(mddev != bdev->bd_disk->private_data);
7111
7112         if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
7113                 goto out;
7114
7115         if (test_bit(MD_CLOSING, &mddev->flags)) {
7116                 mutex_unlock(&mddev->open_mutex);
7117                 err = -ENODEV;
7118                 goto out;
7119         }
7120
7121         err = 0;
7122         atomic_inc(&mddev->openers);
7123         mutex_unlock(&mddev->open_mutex);
7124
7125         check_disk_change(bdev);
7126  out:
7127         if (err)
7128                 mddev_put(mddev);
7129         return err;
7130 }
7131
7132 static void md_release(struct gendisk *disk, fmode_t mode)
7133 {
7134         struct mddev *mddev = disk->private_data;
7135
7136         BUG_ON(!mddev);
7137         atomic_dec(&mddev->openers);
7138         mddev_put(mddev);
7139 }
7140
7141 static int md_media_changed(struct gendisk *disk)
7142 {
7143         struct mddev *mddev = disk->private_data;
7144
7145         return mddev->changed;
7146 }
7147
7148 static int md_revalidate(struct gendisk *disk)
7149 {
7150         struct mddev *mddev = disk->private_data;
7151
7152         mddev->changed = 0;
7153         return 0;
7154 }
7155 static const struct block_device_operations md_fops =
7156 {
7157         .owner          = THIS_MODULE,
7158         .open           = md_open,
7159         .release        = md_release,
7160         .ioctl          = md_ioctl,
7161 #ifdef CONFIG_COMPAT
7162         .compat_ioctl   = md_compat_ioctl,
7163 #endif
7164         .getgeo         = md_getgeo,
7165         .media_changed  = md_media_changed,
7166         .revalidate_disk= md_revalidate,
7167 };
7168
7169 static int md_thread(void *arg)
7170 {
7171         struct md_thread *thread = arg;
7172
7173         /*
7174          * md_thread is a 'system-thread', it's priority should be very
7175          * high. We avoid resource deadlocks individually in each
7176          * raid personality. (RAID5 does preallocation) We also use RR and
7177          * the very same RT priority as kswapd, thus we will never get
7178          * into a priority inversion deadlock.
7179          *
7180          * we definitely have to have equal or higher priority than
7181          * bdflush, otherwise bdflush will deadlock if there are too
7182          * many dirty RAID5 blocks.
7183          */
7184
7185         allow_signal(SIGKILL);
7186         while (!kthread_should_stop()) {
7187
7188                 /* We need to wait INTERRUPTIBLE so that
7189                  * we don't add to the load-average.
7190                  * That means we need to be sure no signals are
7191                  * pending
7192                  */
7193                 if (signal_pending(current))
7194                         flush_signals(current);
7195
7196                 wait_event_interruptible_timeout
7197                         (thread->wqueue,
7198                          test_bit(THREAD_WAKEUP, &thread->flags)
7199                          || kthread_should_stop() || kthread_should_park(),
7200                          thread->timeout);
7201
7202                 clear_bit(THREAD_WAKEUP, &thread->flags);
7203                 if (kthread_should_park())
7204                         kthread_parkme();
7205                 if (!kthread_should_stop())
7206                         thread->run(thread);
7207         }
7208
7209         return 0;
7210 }
7211
7212 void md_wakeup_thread(struct md_thread *thread)
7213 {
7214         if (thread) {
7215                 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
7216                 set_bit(THREAD_WAKEUP, &thread->flags);
7217                 wake_up(&thread->wqueue);
7218         }
7219 }
7220 EXPORT_SYMBOL(md_wakeup_thread);
7221
7222 struct md_thread *md_register_thread(void (*run) (struct md_thread *),
7223                 struct mddev *mddev, const char *name)
7224 {
7225         struct md_thread *thread;
7226
7227         thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
7228         if (!thread)
7229                 return NULL;
7230
7231         init_waitqueue_head(&thread->wqueue);
7232
7233         thread->run = run;
7234         thread->mddev = mddev;
7235         thread->timeout = MAX_SCHEDULE_TIMEOUT;
7236         thread->tsk = kthread_run(md_thread, thread,
7237                                   "%s_%s",
7238                                   mdname(thread->mddev),
7239                                   name);
7240         if (IS_ERR(thread->tsk)) {
7241                 kfree(thread);
7242                 return NULL;
7243         }
7244         return thread;
7245 }
7246 EXPORT_SYMBOL(md_register_thread);
7247
7248 void md_unregister_thread(struct md_thread **threadp)
7249 {
7250         struct md_thread *thread = *threadp;
7251         if (!thread)
7252                 return;
7253         pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
7254         /* Locking ensures that mddev_unlock does not wake_up a
7255          * non-existent thread
7256          */
7257         spin_lock(&pers_lock);
7258         *threadp = NULL;
7259         spin_unlock(&pers_lock);
7260
7261         kthread_stop(thread->tsk);
7262         kfree(thread);
7263 }
7264 EXPORT_SYMBOL(md_unregister_thread);
7265
7266 void md_error(struct mddev *mddev, struct md_rdev *rdev)
7267 {
7268         if (!rdev || test_bit(Faulty, &rdev->flags))
7269                 return;
7270
7271         if (!mddev->pers || !mddev->pers->error_handler)
7272                 return;
7273         mddev->pers->error_handler(mddev,rdev);
7274         if (mddev->degraded)
7275                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
7276         sysfs_notify_dirent_safe(rdev->sysfs_state);
7277         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7278         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7279         md_wakeup_thread(mddev->thread);
7280         if (mddev->event_work.func)
7281                 queue_work(md_misc_wq, &mddev->event_work);
7282         md_new_event(mddev);
7283 }
7284 EXPORT_SYMBOL(md_error);
7285
7286 /* seq_file implementation /proc/mdstat */
7287
7288 static void status_unused(struct seq_file *seq)
7289 {
7290         int i = 0;
7291         struct md_rdev *rdev;
7292
7293         seq_printf(seq, "unused devices: ");
7294
7295         list_for_each_entry(rdev, &pending_raid_disks, same_set) {
7296                 char b[BDEVNAME_SIZE];
7297                 i++;
7298                 seq_printf(seq, "%s ",
7299                               bdevname(rdev->bdev,b));
7300         }
7301         if (!i)
7302                 seq_printf(seq, "<none>");
7303
7304         seq_printf(seq, "\n");
7305 }
7306
7307 static int status_resync(struct seq_file *seq, struct mddev *mddev)
7308 {
7309         sector_t max_sectors, resync, res;
7310         unsigned long dt, db;
7311         sector_t rt;
7312         int scale;
7313         unsigned int per_milli;
7314
7315         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7316             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7317                 max_sectors = mddev->resync_max_sectors;
7318         else
7319                 max_sectors = mddev->dev_sectors;
7320
7321         resync = mddev->curr_resync;
7322         if (resync <= 3) {
7323                 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7324                         /* Still cleaning up */
7325                         resync = max_sectors;
7326         } else
7327                 resync -= atomic_read(&mddev->recovery_active);
7328
7329         if (resync == 0) {
7330                 if (mddev->recovery_cp < MaxSector) {
7331                         seq_printf(seq, "\tresync=PENDING");
7332                         return 1;
7333                 }
7334                 return 0;
7335         }
7336         if (resync < 3) {
7337                 seq_printf(seq, "\tresync=DELAYED");
7338                 return 1;
7339         }
7340
7341         WARN_ON(max_sectors == 0);
7342         /* Pick 'scale' such that (resync>>scale)*1000 will fit
7343          * in a sector_t, and (max_sectors>>scale) will fit in a
7344          * u32, as those are the requirements for sector_div.
7345          * Thus 'scale' must be at least 10
7346          */
7347         scale = 10;
7348         if (sizeof(sector_t) > sizeof(unsigned long)) {
7349                 while ( max_sectors/2 > (1ULL<<(scale+32)))
7350                         scale++;
7351         }
7352         res = (resync>>scale)*1000;
7353         sector_div(res, (u32)((max_sectors>>scale)+1));
7354
7355         per_milli = res;
7356         {
7357                 int i, x = per_milli/50, y = 20-x;
7358                 seq_printf(seq, "[");
7359                 for (i = 0; i < x; i++)
7360                         seq_printf(seq, "=");
7361                 seq_printf(seq, ">");
7362                 for (i = 0; i < y; i++)
7363                         seq_printf(seq, ".");
7364                 seq_printf(seq, "] ");
7365         }
7366         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
7367                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
7368                     "reshape" :
7369                     (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
7370                      "check" :
7371                      (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
7372                       "resync" : "recovery"))),
7373                    per_milli/10, per_milli % 10,
7374                    (unsigned long long) resync/2,
7375                    (unsigned long long) max_sectors/2);
7376
7377         /*
7378          * dt: time from mark until now
7379          * db: blocks written from mark until now
7380          * rt: remaining time
7381          *
7382          * rt is a sector_t, so could be 32bit or 64bit.
7383          * So we divide before multiply in case it is 32bit and close
7384          * to the limit.
7385          * We scale the divisor (db) by 32 to avoid losing precision
7386          * near the end of resync when the number of remaining sectors
7387          * is close to 'db'.
7388          * We then divide rt by 32 after multiplying by db to compensate.
7389          * The '+1' avoids division by zero if db is very small.
7390          */
7391         dt = ((jiffies - mddev->resync_mark) / HZ);
7392         if (!dt) dt++;
7393         db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
7394                 - mddev->resync_mark_cnt;
7395
7396         rt = max_sectors - resync;    /* number of remaining sectors */
7397         sector_div(rt, db/32+1);
7398         rt *= dt;
7399         rt >>= 5;
7400
7401         seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
7402                    ((unsigned long)rt % 60)/6);
7403
7404         seq_printf(seq, " speed=%ldK/sec", db/2/dt);
7405         return 1;
7406 }
7407
7408 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
7409 {
7410         struct list_head *tmp;
7411         loff_t l = *pos;
7412         struct mddev *mddev;
7413
7414         if (l >= 0x10000)
7415                 return NULL;
7416         if (!l--)
7417                 /* header */
7418                 return (void*)1;
7419
7420         spin_lock(&all_mddevs_lock);
7421         list_for_each(tmp,&all_mddevs)
7422                 if (!l--) {
7423                         mddev = list_entry(tmp, struct mddev, all_mddevs);
7424                         mddev_get(mddev);
7425                         spin_unlock(&all_mddevs_lock);
7426                         return mddev;
7427                 }
7428         spin_unlock(&all_mddevs_lock);
7429         if (!l--)
7430                 return (void*)2;/* tail */
7431         return NULL;
7432 }
7433
7434 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
7435 {
7436         struct list_head *tmp;
7437         struct mddev *next_mddev, *mddev = v;
7438
7439         ++*pos;
7440         if (v == (void*)2)
7441                 return NULL;
7442
7443         spin_lock(&all_mddevs_lock);
7444         if (v == (void*)1)
7445                 tmp = all_mddevs.next;
7446         else
7447                 tmp = mddev->all_mddevs.next;
7448         if (tmp != &all_mddevs)
7449                 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
7450         else {
7451                 next_mddev = (void*)2;
7452                 *pos = 0x10000;
7453         }
7454         spin_unlock(&all_mddevs_lock);
7455
7456         if (v != (void*)1)
7457                 mddev_put(mddev);
7458         return next_mddev;
7459
7460 }
7461
7462 static void md_seq_stop(struct seq_file *seq, void *v)
7463 {
7464         struct mddev *mddev = v;
7465
7466         if (mddev && v != (void*)1 && v != (void*)2)
7467                 mddev_put(mddev);
7468 }
7469
7470 static int md_seq_show(struct seq_file *seq, void *v)
7471 {
7472         struct mddev *mddev = v;
7473         sector_t sectors;
7474         struct md_rdev *rdev;
7475
7476         if (v == (void*)1) {
7477                 struct md_personality *pers;
7478                 seq_printf(seq, "Personalities : ");
7479                 spin_lock(&pers_lock);
7480                 list_for_each_entry(pers, &pers_list, list)
7481                         seq_printf(seq, "[%s] ", pers->name);
7482
7483                 spin_unlock(&pers_lock);
7484                 seq_printf(seq, "\n");
7485                 seq->poll_event = atomic_read(&md_event_count);
7486                 return 0;
7487         }
7488         if (v == (void*)2) {
7489                 status_unused(seq);
7490                 return 0;
7491         }
7492
7493         spin_lock(&mddev->lock);
7494         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
7495                 seq_printf(seq, "%s : %sactive", mdname(mddev),
7496                                                 mddev->pers ? "" : "in");
7497                 if (mddev->pers) {
7498                         if (mddev->ro==1)
7499                                 seq_printf(seq, " (read-only)");
7500                         if (mddev->ro==2)
7501                                 seq_printf(seq, " (auto-read-only)");
7502                         seq_printf(seq, " %s", mddev->pers->name);
7503                 }
7504
7505                 sectors = 0;
7506                 rcu_read_lock();
7507                 rdev_for_each_rcu(rdev, mddev) {
7508                         char b[BDEVNAME_SIZE];
7509                         seq_printf(seq, " %s[%d]",
7510                                 bdevname(rdev->bdev,b), rdev->desc_nr);
7511                         if (test_bit(WriteMostly, &rdev->flags))
7512                                 seq_printf(seq, "(W)");
7513                         if (test_bit(Journal, &rdev->flags))
7514                                 seq_printf(seq, "(J)");
7515                         if (test_bit(Faulty, &rdev->flags)) {
7516                                 seq_printf(seq, "(F)");
7517                                 continue;
7518                         }
7519                         if (rdev->raid_disk < 0)
7520                                 seq_printf(seq, "(S)"); /* spare */
7521                         if (test_bit(Replacement, &rdev->flags))
7522                                 seq_printf(seq, "(R)");
7523                         sectors += rdev->sectors;
7524                 }
7525                 rcu_read_unlock();
7526
7527                 if (!list_empty(&mddev->disks)) {
7528                         if (mddev->pers)
7529                                 seq_printf(seq, "\n      %llu blocks",
7530                                            (unsigned long long)
7531                                            mddev->array_sectors / 2);
7532                         else
7533                                 seq_printf(seq, "\n      %llu blocks",
7534                                            (unsigned long long)sectors / 2);
7535                 }
7536                 if (mddev->persistent) {
7537                         if (mddev->major_version != 0 ||
7538                             mddev->minor_version != 90) {
7539                                 seq_printf(seq," super %d.%d",
7540                                            mddev->major_version,
7541                                            mddev->minor_version);
7542                         }
7543                 } else if (mddev->external)
7544                         seq_printf(seq, " super external:%s",
7545                                    mddev->metadata_type);
7546                 else
7547                         seq_printf(seq, " super non-persistent");
7548
7549                 if (mddev->pers) {
7550                         mddev->pers->status(seq, mddev);
7551                         seq_printf(seq, "\n      ");
7552                         if (mddev->pers->sync_request) {
7553                                 if (status_resync(seq, mddev))
7554                                         seq_printf(seq, "\n      ");
7555                         }
7556                 } else
7557                         seq_printf(seq, "\n       ");
7558
7559                 bitmap_status(seq, mddev->bitmap);
7560
7561                 seq_printf(seq, "\n");
7562         }
7563         spin_unlock(&mddev->lock);
7564
7565         return 0;
7566 }
7567
7568 static const struct seq_operations md_seq_ops = {
7569         .start  = md_seq_start,
7570         .next   = md_seq_next,
7571         .stop   = md_seq_stop,
7572         .show   = md_seq_show,
7573 };
7574
7575 static int md_seq_open(struct inode *inode, struct file *file)
7576 {
7577         struct seq_file *seq;
7578         int error;
7579
7580         error = seq_open(file, &md_seq_ops);
7581         if (error)
7582                 return error;
7583
7584         seq = file->private_data;
7585         seq->poll_event = atomic_read(&md_event_count);
7586         return error;
7587 }
7588
7589 static int md_unloading;
7590 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
7591 {
7592         struct seq_file *seq = filp->private_data;
7593         int mask;
7594
7595         if (md_unloading)
7596                 return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
7597         poll_wait(filp, &md_event_waiters, wait);
7598
7599         /* always allow read */
7600         mask = POLLIN | POLLRDNORM;
7601
7602         if (seq->poll_event != atomic_read(&md_event_count))
7603                 mask |= POLLERR | POLLPRI;
7604         return mask;
7605 }
7606
7607 static const struct file_operations md_seq_fops = {
7608         .owner          = THIS_MODULE,
7609         .open           = md_seq_open,
7610         .read           = seq_read,
7611         .llseek         = seq_lseek,
7612         .release        = seq_release_private,
7613         .poll           = mdstat_poll,
7614 };
7615
7616 int register_md_personality(struct md_personality *p)
7617 {
7618         pr_debug("md: %s personality registered for level %d\n",
7619                  p->name, p->level);
7620         spin_lock(&pers_lock);
7621         list_add_tail(&p->list, &pers_list);
7622         spin_unlock(&pers_lock);
7623         return 0;
7624 }
7625 EXPORT_SYMBOL(register_md_personality);
7626
7627 int unregister_md_personality(struct md_personality *p)
7628 {
7629         pr_debug("md: %s personality unregistered\n", p->name);
7630         spin_lock(&pers_lock);
7631         list_del_init(&p->list);
7632         spin_unlock(&pers_lock);
7633         return 0;
7634 }
7635 EXPORT_SYMBOL(unregister_md_personality);
7636
7637 int register_md_cluster_operations(struct md_cluster_operations *ops,
7638                                    struct module *module)
7639 {
7640         int ret = 0;
7641         spin_lock(&pers_lock);
7642         if (md_cluster_ops != NULL)
7643                 ret = -EALREADY;
7644         else {
7645                 md_cluster_ops = ops;
7646                 md_cluster_mod = module;
7647         }
7648         spin_unlock(&pers_lock);
7649         return ret;
7650 }
7651 EXPORT_SYMBOL(register_md_cluster_operations);
7652
7653 int unregister_md_cluster_operations(void)
7654 {
7655         spin_lock(&pers_lock);
7656         md_cluster_ops = NULL;
7657         spin_unlock(&pers_lock);
7658         return 0;
7659 }
7660 EXPORT_SYMBOL(unregister_md_cluster_operations);
7661
7662 int md_setup_cluster(struct mddev *mddev, int nodes)
7663 {
7664         if (!md_cluster_ops)
7665                 request_module("md-cluster");
7666         spin_lock(&pers_lock);
7667         /* ensure module won't be unloaded */
7668         if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
7669                 pr_warn("can't find md-cluster module or get it's reference.\n");
7670                 spin_unlock(&pers_lock);
7671                 return -ENOENT;
7672         }
7673         spin_unlock(&pers_lock);
7674
7675         return md_cluster_ops->join(mddev, nodes);
7676 }
7677
7678 void md_cluster_stop(struct mddev *mddev)
7679 {
7680         if (!md_cluster_ops)
7681                 return;
7682         md_cluster_ops->leave(mddev);
7683         module_put(md_cluster_mod);
7684 }
7685
7686 static int is_mddev_idle(struct mddev *mddev, int init)
7687 {
7688         struct md_rdev *rdev;
7689         int idle;
7690         int curr_events;
7691
7692         idle = 1;
7693         rcu_read_lock();
7694         rdev_for_each_rcu(rdev, mddev) {
7695                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
7696                 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
7697                               (int)part_stat_read(&disk->part0, sectors[1]) -
7698                               atomic_read(&disk->sync_io);
7699                 /* sync IO will cause sync_io to increase before the disk_stats
7700                  * as sync_io is counted when a request starts, and
7701                  * disk_stats is counted when it completes.
7702                  * So resync activity will cause curr_events to be smaller than
7703                  * when there was no such activity.
7704                  * non-sync IO will cause disk_stat to increase without
7705                  * increasing sync_io so curr_events will (eventually)
7706                  * be larger than it was before.  Once it becomes
7707                  * substantially larger, the test below will cause
7708                  * the array to appear non-idle, and resync will slow
7709                  * down.
7710                  * If there is a lot of outstanding resync activity when
7711                  * we set last_event to curr_events, then all that activity
7712                  * completing might cause the array to appear non-idle
7713                  * and resync will be slowed down even though there might
7714                  * not have been non-resync activity.  This will only
7715                  * happen once though.  'last_events' will soon reflect
7716                  * the state where there is little or no outstanding
7717                  * resync requests, and further resync activity will
7718                  * always make curr_events less than last_events.
7719                  *
7720                  */
7721                 if (init || curr_events - rdev->last_events > 64) {
7722                         rdev->last_events = curr_events;
7723                         idle = 0;
7724                 }
7725         }
7726         rcu_read_unlock();
7727         return idle;
7728 }
7729
7730 void md_done_sync(struct mddev *mddev, int blocks, int ok)
7731 {
7732         /* another "blocks" (512byte) blocks have been synced */
7733         atomic_sub(blocks, &mddev->recovery_active);
7734         wake_up(&mddev->recovery_wait);
7735         if (!ok) {
7736                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7737                 set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
7738                 md_wakeup_thread(mddev->thread);
7739                 // stop recovery, signal do_sync ....
7740         }
7741 }
7742 EXPORT_SYMBOL(md_done_sync);
7743
7744 /* md_write_start(mddev, bi)
7745  * If we need to update some array metadata (e.g. 'active' flag
7746  * in superblock) before writing, schedule a superblock update
7747  * and wait for it to complete.
7748  */
7749 void md_write_start(struct mddev *mddev, struct bio *bi)
7750 {
7751         int did_change = 0;
7752         if (bio_data_dir(bi) != WRITE)
7753                 return;
7754
7755         BUG_ON(mddev->ro == 1);
7756         if (mddev->ro == 2) {
7757                 /* need to switch to read/write */
7758                 mddev->ro = 0;
7759                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7760                 md_wakeup_thread(mddev->thread);
7761                 md_wakeup_thread(mddev->sync_thread);
7762                 did_change = 1;
7763         }
7764         atomic_inc(&mddev->writes_pending);
7765         if (mddev->safemode == 1)
7766                 mddev->safemode = 0;
7767         if (mddev->in_sync) {
7768                 spin_lock(&mddev->lock);
7769                 if (mddev->in_sync) {
7770                         mddev->in_sync = 0;
7771                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7772                         set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7773                         md_wakeup_thread(mddev->thread);
7774                         did_change = 1;
7775                 }
7776                 spin_unlock(&mddev->lock);
7777         }
7778         if (did_change)
7779                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7780         wait_event(mddev->sb_wait,
7781                    !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
7782 }
7783 EXPORT_SYMBOL(md_write_start);
7784
7785 void md_write_end(struct mddev *mddev)
7786 {
7787         if (atomic_dec_and_test(&mddev->writes_pending)) {
7788                 if (mddev->safemode == 2)
7789                         md_wakeup_thread(mddev->thread);
7790                 else if (mddev->safemode_delay)
7791                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
7792         }
7793 }
7794 EXPORT_SYMBOL(md_write_end);
7795
7796 /* md_allow_write(mddev)
7797  * Calling this ensures that the array is marked 'active' so that writes
7798  * may proceed without blocking.  It is important to call this before
7799  * attempting a GFP_KERNEL allocation while holding the mddev lock.
7800  * Must be called with mddev_lock held.
7801  *
7802  * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock
7803  * is dropped, so return -EAGAIN after notifying userspace.
7804  */
7805 int md_allow_write(struct mddev *mddev)
7806 {
7807         if (!mddev->pers)
7808                 return 0;
7809         if (mddev->ro)
7810                 return 0;
7811         if (!mddev->pers->sync_request)
7812                 return 0;
7813
7814         spin_lock(&mddev->lock);
7815         if (mddev->in_sync) {
7816                 mddev->in_sync = 0;
7817                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
7818                 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
7819                 if (mddev->safemode_delay &&
7820                     mddev->safemode == 0)
7821                         mddev->safemode = 1;
7822                 spin_unlock(&mddev->lock);
7823                 md_update_sb(mddev, 0);
7824                 sysfs_notify_dirent_safe(mddev->sysfs_state);
7825         } else
7826                 spin_unlock(&mddev->lock);
7827
7828         if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
7829                 return -EAGAIN;
7830         else
7831                 return 0;
7832 }
7833 EXPORT_SYMBOL_GPL(md_allow_write);
7834
7835 #define SYNC_MARKS      10
7836 #define SYNC_MARK_STEP  (3*HZ)
7837 #define UPDATE_FREQUENCY (5*60*HZ)
7838 void md_do_sync(struct md_thread *thread)
7839 {
7840         struct mddev *mddev = thread->mddev;
7841         struct mddev *mddev2;
7842         unsigned int currspeed = 0,
7843                  window;
7844         sector_t max_sectors,j, io_sectors, recovery_done;
7845         unsigned long mark[SYNC_MARKS];
7846         unsigned long update_time;
7847         sector_t mark_cnt[SYNC_MARKS];
7848         int last_mark,m;
7849         struct list_head *tmp;
7850         sector_t last_check;
7851         int skipped = 0;
7852         struct md_rdev *rdev;
7853         char *desc, *action = NULL;
7854         struct blk_plug plug;
7855         int ret;
7856
7857         /* just incase thread restarts... */
7858         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
7859                 return;
7860         if (mddev->ro) {/* never try to sync a read-only array */
7861                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
7862                 return;
7863         }
7864
7865         if (mddev_is_clustered(mddev)) {
7866                 ret = md_cluster_ops->resync_start(mddev);
7867                 if (ret)
7868                         goto skip;
7869
7870                 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
7871                 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
7872                         test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
7873                         test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
7874                      && ((unsigned long long)mddev->curr_resync_completed
7875                          < (unsigned long long)mddev->resync_max_sectors))
7876                         goto skip;
7877         }
7878
7879         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7880                 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
7881                         desc = "data-check";
7882                         action = "check";
7883                 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
7884                         desc = "requested-resync";
7885                         action = "repair";
7886                 } else
7887                         desc = "resync";
7888         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7889                 desc = "reshape";
7890         else
7891                 desc = "recovery";
7892
7893         mddev->last_sync_action = action ?: desc;
7894
7895         /* we overload curr_resync somewhat here.
7896          * 0 == not engaged in resync at all
7897          * 2 == checking that there is no conflict with another sync
7898          * 1 == like 2, but have yielded to allow conflicting resync to
7899          *              commense
7900          * other == active in resync - this many blocks
7901          *
7902          * Before starting a resync we must have set curr_resync to
7903          * 2, and then checked that every "conflicting" array has curr_resync
7904          * less than ours.  When we find one that is the same or higher
7905          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
7906          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
7907          * This will mean we have to start checking from the beginning again.
7908          *
7909          */
7910
7911         do {
7912                 int mddev2_minor = -1;
7913                 mddev->curr_resync = 2;
7914
7915         try_again:
7916                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
7917                         goto skip;
7918                 for_each_mddev(mddev2, tmp) {
7919                         if (mddev2 == mddev)
7920                                 continue;
7921                         if (!mddev->parallel_resync
7922                         &&  mddev2->curr_resync
7923                         &&  match_mddev_units(mddev, mddev2)) {
7924                                 DEFINE_WAIT(wq);
7925                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
7926                                         /* arbitrarily yield */
7927                                         mddev->curr_resync = 1;
7928                                         wake_up(&resync_wait);
7929                                 }
7930                                 if (mddev > mddev2 && mddev->curr_resync == 1)
7931                                         /* no need to wait here, we can wait the next
7932                                          * time 'round when curr_resync == 2
7933                                          */
7934                                         continue;
7935                                 /* We need to wait 'interruptible' so as not to
7936                                  * contribute to the load average, and not to
7937                                  * be caught by 'softlockup'
7938                                  */
7939                                 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
7940                                 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
7941                                     mddev2->curr_resync >= mddev->curr_resync) {
7942                                         if (mddev2_minor != mddev2->md_minor) {
7943                                                 mddev2_minor = mddev2->md_minor;
7944                                                 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
7945                                                         desc, mdname(mddev),
7946                                                         mdname(mddev2));
7947                                         }
7948                                         mddev_put(mddev2);
7949                                         if (signal_pending(current))
7950                                                 flush_signals(current);
7951                                         schedule();
7952                                         finish_wait(&resync_wait, &wq);
7953                                         goto try_again;
7954                                 }
7955                                 finish_wait(&resync_wait, &wq);
7956                         }
7957                 }
7958         } while (mddev->curr_resync < 2);
7959
7960         j = 0;
7961         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
7962                 /* resync follows the size requested by the personality,
7963                  * which defaults to physical size, but can be virtual size
7964                  */
7965                 max_sectors = mddev->resync_max_sectors;
7966                 atomic64_set(&mddev->resync_mismatches, 0);
7967                 /* we don't use the checkpoint if there's a bitmap */
7968                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
7969                         j = mddev->resync_min;
7970                 else if (!mddev->bitmap)
7971                         j = mddev->recovery_cp;
7972
7973         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
7974                 max_sectors = mddev->resync_max_sectors;
7975         else {
7976                 /* recovery follows the physical size of devices */
7977                 max_sectors = mddev->dev_sectors;
7978                 j = MaxSector;
7979                 rcu_read_lock();
7980                 rdev_for_each_rcu(rdev, mddev)
7981                         if (rdev->raid_disk >= 0 &&
7982                             !test_bit(Journal, &rdev->flags) &&
7983                             !test_bit(Faulty, &rdev->flags) &&
7984                             !test_bit(In_sync, &rdev->flags) &&
7985                             rdev->recovery_offset < j)
7986                                 j = rdev->recovery_offset;
7987                 rcu_read_unlock();
7988
7989                 /* If there is a bitmap, we need to make sure all
7990                  * writes that started before we added a spare
7991                  * complete before we start doing a recovery.
7992                  * Otherwise the write might complete and (via
7993                  * bitmap_endwrite) set a bit in the bitmap after the
7994                  * recovery has checked that bit and skipped that
7995                  * region.
7996                  */
7997                 if (mddev->bitmap) {
7998                         mddev->pers->quiesce(mddev, 1);
7999                         mddev->pers->quiesce(mddev, 0);
8000                 }
8001         }
8002
8003         pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
8004         pr_debug("md: minimum _guaranteed_  speed: %d KB/sec/disk.\n", speed_min(mddev));
8005         pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
8006                  speed_max(mddev), desc);
8007
8008         is_mddev_idle(mddev, 1); /* this initializes IO event counters */
8009
8010         io_sectors = 0;
8011         for (m = 0; m < SYNC_MARKS; m++) {
8012                 mark[m] = jiffies;
8013                 mark_cnt[m] = io_sectors;
8014         }
8015         last_mark = 0;
8016         mddev->resync_mark = mark[last_mark];
8017         mddev->resync_mark_cnt = mark_cnt[last_mark];
8018
8019         /*
8020          * Tune reconstruction:
8021          */
8022         window = 32*(PAGE_SIZE/512);
8023         pr_debug("md: using %dk window, over a total of %lluk.\n",
8024                  window/2, (unsigned long long)max_sectors/2);
8025
8026         atomic_set(&mddev->recovery_active, 0);
8027         last_check = 0;
8028
8029         if (j>2) {
8030                 pr_debug("md: resuming %s of %s from checkpoint.\n",
8031                          desc, mdname(mddev));
8032                 mddev->curr_resync = j;
8033         } else
8034                 mddev->curr_resync = 3; /* no longer delayed */
8035         mddev->curr_resync_completed = j;
8036         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8037         md_new_event(mddev);
8038         update_time = jiffies;
8039
8040         blk_start_plug(&plug);
8041         while (j < max_sectors) {
8042                 sector_t sectors;
8043
8044                 skipped = 0;
8045
8046                 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8047                     ((mddev->curr_resync > mddev->curr_resync_completed &&
8048                       (mddev->curr_resync - mddev->curr_resync_completed)
8049                       > (max_sectors >> 4)) ||
8050                      time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
8051                      (j - mddev->curr_resync_completed)*2
8052                      >= mddev->resync_max - mddev->curr_resync_completed ||
8053                      mddev->curr_resync_completed > mddev->resync_max
8054                             )) {
8055                         /* time to update curr_resync_completed */
8056                         wait_event(mddev->recovery_wait,
8057                                    atomic_read(&mddev->recovery_active) == 0);
8058                         mddev->curr_resync_completed = j;
8059                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
8060                             j > mddev->recovery_cp)
8061                                 mddev->recovery_cp = j;
8062                         update_time = jiffies;
8063                         set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8064                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8065                 }
8066
8067                 while (j >= mddev->resync_max &&
8068                        !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8069                         /* As this condition is controlled by user-space,
8070                          * we can block indefinitely, so use '_interruptible'
8071                          * to avoid triggering warnings.
8072                          */
8073                         flush_signals(current); /* just in case */
8074                         wait_event_interruptible(mddev->recovery_wait,
8075                                                  mddev->resync_max > j
8076                                                  || test_bit(MD_RECOVERY_INTR,
8077                                                              &mddev->recovery));
8078                 }
8079
8080                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8081                         break;
8082
8083                 sectors = mddev->pers->sync_request(mddev, j, &skipped);
8084                 if (sectors == 0) {
8085                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8086                         break;
8087                 }
8088
8089                 if (!skipped) { /* actual IO requested */
8090                         io_sectors += sectors;
8091                         atomic_add(sectors, &mddev->recovery_active);
8092                 }
8093
8094                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8095                         break;
8096
8097                 j += sectors;
8098                 if (j > max_sectors)
8099                         /* when skipping, extra large numbers can be returned. */
8100                         j = max_sectors;
8101                 if (j > 2)
8102                         mddev->curr_resync = j;
8103                 mddev->curr_mark_cnt = io_sectors;
8104                 if (last_check == 0)
8105                         /* this is the earliest that rebuild will be
8106                          * visible in /proc/mdstat
8107                          */
8108                         md_new_event(mddev);
8109
8110                 if (last_check + window > io_sectors || j == max_sectors)
8111                         continue;
8112
8113                 last_check = io_sectors;
8114         repeat:
8115                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
8116                         /* step marks */
8117                         int next = (last_mark+1) % SYNC_MARKS;
8118
8119                         mddev->resync_mark = mark[next];
8120                         mddev->resync_mark_cnt = mark_cnt[next];
8121                         mark[next] = jiffies;
8122                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
8123                         last_mark = next;
8124                 }
8125
8126                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8127                         break;
8128
8129                 /*
8130                  * this loop exits only if either when we are slower than
8131                  * the 'hard' speed limit, or the system was IO-idle for
8132                  * a jiffy.
8133                  * the system might be non-idle CPU-wise, but we only care
8134                  * about not overloading the IO subsystem. (things like an
8135                  * e2fsck being done on the RAID array should execute fast)
8136                  */
8137                 cond_resched();
8138
8139                 recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
8140                 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
8141                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
8142
8143                 if (currspeed > speed_min(mddev)) {
8144                         if (currspeed > speed_max(mddev)) {
8145                                 msleep(500);
8146                                 goto repeat;
8147                         }
8148                         if (!is_mddev_idle(mddev, 0)) {
8149                                 /*
8150                                  * Give other IO more of a chance.
8151                                  * The faster the devices, the less we wait.
8152                                  */
8153                                 wait_event(mddev->recovery_wait,
8154                                            !atomic_read(&mddev->recovery_active));
8155                         }
8156                 }
8157         }
8158         pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
8159                 test_bit(MD_RECOVERY_INTR, &mddev->recovery)
8160                 ? "interrupted" : "done");
8161         /*
8162          * this also signals 'finished resyncing' to md_stop
8163          */
8164         blk_finish_plug(&plug);
8165         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
8166
8167         if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8168             !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8169             mddev->curr_resync > 3) {
8170                 mddev->curr_resync_completed = mddev->curr_resync;
8171                 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
8172         }
8173         mddev->pers->sync_request(mddev, max_sectors, &skipped);
8174
8175         if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
8176             mddev->curr_resync > 3) {
8177                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
8178                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8179                                 if (mddev->curr_resync >= mddev->recovery_cp) {
8180                                         pr_debug("md: checkpointing %s of %s.\n",
8181                                                  desc, mdname(mddev));
8182                                         if (test_bit(MD_RECOVERY_ERROR,
8183                                                 &mddev->recovery))
8184                                                 mddev->recovery_cp =
8185                                                         mddev->curr_resync_completed;
8186                                         else
8187                                                 mddev->recovery_cp =
8188                                                         mddev->curr_resync;
8189                                 }
8190                         } else
8191                                 mddev->recovery_cp = MaxSector;
8192                 } else {
8193                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
8194                                 mddev->curr_resync = MaxSector;
8195                         rcu_read_lock();
8196                         rdev_for_each_rcu(rdev, mddev)
8197                                 if (rdev->raid_disk >= 0 &&
8198                                     mddev->delta_disks >= 0 &&
8199                                     !test_bit(Journal, &rdev->flags) &&
8200                                     !test_bit(Faulty, &rdev->flags) &&
8201                                     !test_bit(In_sync, &rdev->flags) &&
8202                                     rdev->recovery_offset < mddev->curr_resync)
8203                                         rdev->recovery_offset = mddev->curr_resync;
8204                         rcu_read_unlock();
8205                 }
8206         }
8207  skip:
8208         /* set CHANGE_PENDING here since maybe another update is needed,
8209          * so other nodes are informed. It should be harmless for normal
8210          * raid */
8211         set_mask_bits(&mddev->sb_flags, 0,
8212                       BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
8213
8214         spin_lock(&mddev->lock);
8215         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8216                 /* We completed so min/max setting can be forgotten if used. */
8217                 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8218                         mddev->resync_min = 0;
8219                 mddev->resync_max = MaxSector;
8220         } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
8221                 mddev->resync_min = mddev->curr_resync_completed;
8222         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
8223         mddev->curr_resync = 0;
8224         spin_unlock(&mddev->lock);
8225
8226         wake_up(&resync_wait);
8227         md_wakeup_thread(mddev->thread);
8228         return;
8229 }
8230 EXPORT_SYMBOL_GPL(md_do_sync);
8231
8232 static int remove_and_add_spares(struct mddev *mddev,
8233                                  struct md_rdev *this)
8234 {
8235         struct md_rdev *rdev;
8236         int spares = 0;
8237         int removed = 0;
8238         bool remove_some = false;
8239
8240         rdev_for_each(rdev, mddev) {
8241                 if ((this == NULL || rdev == this) &&
8242                     rdev->raid_disk >= 0 &&
8243                     !test_bit(Blocked, &rdev->flags) &&
8244                     test_bit(Faulty, &rdev->flags) &&
8245                     atomic_read(&rdev->nr_pending)==0) {
8246                         /* Faulty non-Blocked devices with nr_pending == 0
8247                          * never get nr_pending incremented,
8248                          * never get Faulty cleared, and never get Blocked set.
8249                          * So we can synchronize_rcu now rather than once per device
8250                          */
8251                         remove_some = true;
8252                         set_bit(RemoveSynchronized, &rdev->flags);
8253                 }
8254         }
8255
8256         if (remove_some)
8257                 synchronize_rcu();
8258         rdev_for_each(rdev, mddev) {
8259                 if ((this == NULL || rdev == this) &&
8260                     rdev->raid_disk >= 0 &&
8261                     !test_bit(Blocked, &rdev->flags) &&
8262                     ((test_bit(RemoveSynchronized, &rdev->flags) ||
8263                      (!test_bit(In_sync, &rdev->flags) &&
8264                       !test_bit(Journal, &rdev->flags))) &&
8265                     atomic_read(&rdev->nr_pending)==0)) {
8266                         if (mddev->pers->hot_remove_disk(
8267                                     mddev, rdev) == 0) {
8268                                 sysfs_unlink_rdev(mddev, rdev);
8269                                 rdev->raid_disk = -1;
8270                                 removed++;
8271                         }
8272                 }
8273                 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
8274                         clear_bit(RemoveSynchronized, &rdev->flags);
8275         }
8276
8277         if (removed && mddev->kobj.sd)
8278                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8279
8280         if (this && removed)
8281                 goto no_add;
8282
8283         rdev_for_each(rdev, mddev) {
8284                 if (this && this != rdev)
8285                         continue;
8286                 if (test_bit(Candidate, &rdev->flags))
8287                         continue;
8288                 if (rdev->raid_disk >= 0 &&
8289                     !test_bit(In_sync, &rdev->flags) &&
8290                     !test_bit(Journal, &rdev->flags) &&
8291                     !test_bit(Faulty, &rdev->flags))
8292                         spares++;
8293                 if (rdev->raid_disk >= 0)
8294                         continue;
8295                 if (test_bit(Faulty, &rdev->flags))
8296                         continue;
8297                 if (!test_bit(Journal, &rdev->flags)) {
8298                         if (mddev->ro &&
8299                             ! (rdev->saved_raid_disk >= 0 &&
8300                                !test_bit(Bitmap_sync, &rdev->flags)))
8301                                 continue;
8302
8303                         rdev->recovery_offset = 0;
8304                 }
8305                 if (mddev->pers->
8306                     hot_add_disk(mddev, rdev) == 0) {
8307                         if (sysfs_link_rdev(mddev, rdev))
8308                                 /* failure here is OK */;
8309                         if (!test_bit(Journal, &rdev->flags))
8310                                 spares++;
8311                         md_new_event(mddev);
8312                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8313                 }
8314         }
8315 no_add:
8316         if (removed)
8317                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8318         return spares;
8319 }
8320
8321 static void md_start_sync(struct work_struct *ws)
8322 {
8323         struct mddev *mddev = container_of(ws, struct mddev, del_work);
8324
8325         mddev->sync_thread = md_register_thread(md_do_sync,
8326                                                 mddev,
8327                                                 "resync");
8328         if (!mddev->sync_thread) {
8329                 pr_warn("%s: could not start resync thread...\n",
8330                         mdname(mddev));
8331                 /* leave the spares where they are, it shouldn't hurt */
8332                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8333                 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8334                 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8335                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8336                 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8337                 wake_up(&resync_wait);
8338                 if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8339                                        &mddev->recovery))
8340                         if (mddev->sysfs_action)
8341                                 sysfs_notify_dirent_safe(mddev->sysfs_action);
8342         } else
8343                 md_wakeup_thread(mddev->sync_thread);
8344         sysfs_notify_dirent_safe(mddev->sysfs_action);
8345         md_new_event(mddev);
8346 }
8347
8348 /*
8349  * This routine is regularly called by all per-raid-array threads to
8350  * deal with generic issues like resync and super-block update.
8351  * Raid personalities that don't have a thread (linear/raid0) do not
8352  * need this as they never do any recovery or update the superblock.
8353  *
8354  * It does not do any resync itself, but rather "forks" off other threads
8355  * to do that as needed.
8356  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
8357  * "->recovery" and create a thread at ->sync_thread.
8358  * When the thread finishes it sets MD_RECOVERY_DONE
8359  * and wakeups up this thread which will reap the thread and finish up.
8360  * This thread also removes any faulty devices (with nr_pending == 0).
8361  *
8362  * The overall approach is:
8363  *  1/ if the superblock needs updating, update it.
8364  *  2/ If a recovery thread is running, don't do anything else.
8365  *  3/ If recovery has finished, clean up, possibly marking spares active.
8366  *  4/ If there are any faulty devices, remove them.
8367  *  5/ If array is degraded, try to add spares devices
8368  *  6/ If array has spares or is not in-sync, start a resync thread.
8369  */
8370 void md_check_recovery(struct mddev *mddev)
8371 {
8372         if (mddev->suspended)
8373                 return;
8374
8375         if (mddev->bitmap)
8376                 bitmap_daemon_work(mddev);
8377
8378         if (signal_pending(current)) {
8379                 if (mddev->pers->sync_request && !mddev->external) {
8380                         pr_debug("md: %s in immediate safe mode\n",
8381                                  mdname(mddev));
8382                         mddev->safemode = 2;
8383                 }
8384                 flush_signals(current);
8385         }
8386
8387         if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
8388                 return;
8389         if ( ! (
8390                 (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
8391                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8392                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
8393                 test_bit(MD_RELOAD_SB, &mddev->flags) ||
8394                 (mddev->external == 0 && mddev->safemode == 1) ||
8395                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
8396                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
8397                 ))
8398                 return;
8399
8400         if (mddev_trylock(mddev)) {
8401                 int spares = 0;
8402
8403                 if (mddev->ro) {
8404                         struct md_rdev *rdev;
8405                         if (!mddev->external && mddev->in_sync)
8406                                 /* 'Blocked' flag not needed as failed devices
8407                                  * will be recorded if array switched to read/write.
8408                                  * Leaving it set will prevent the device
8409                                  * from being removed.
8410                                  */
8411                                 rdev_for_each(rdev, mddev)
8412                                         clear_bit(Blocked, &rdev->flags);
8413                         /* On a read-only array we can:
8414                          * - remove failed devices
8415                          * - add already-in_sync devices if the array itself
8416                          *   is in-sync.
8417                          * As we only add devices that are already in-sync,
8418                          * we can activate the spares immediately.
8419                          */
8420                         remove_and_add_spares(mddev, NULL);
8421                         /* There is no thread, but we need to call
8422                          * ->spare_active and clear saved_raid_disk
8423                          */
8424                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
8425                         md_reap_sync_thread(mddev);
8426                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8427                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8428                         clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
8429                         goto unlock;
8430                 }
8431
8432                 if (mddev_is_clustered(mddev)) {
8433                         struct md_rdev *rdev;
8434                         /* kick the device if another node issued a
8435                          * remove disk.
8436                          */
8437                         rdev_for_each(rdev, mddev) {
8438                                 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8439                                                 rdev->raid_disk < 0)
8440                                         md_kick_rdev_from_array(rdev);
8441                         }
8442
8443                         if (test_and_clear_bit(MD_RELOAD_SB, &mddev->flags))
8444                                 md_reload_sb(mddev, mddev->good_device_nr);
8445                 }
8446
8447                 if (!mddev->external) {
8448                         int did_change = 0;
8449                         spin_lock(&mddev->lock);
8450                         if (mddev->safemode &&
8451                             !atomic_read(&mddev->writes_pending) &&
8452                             !mddev->in_sync &&
8453                             mddev->recovery_cp == MaxSector) {
8454                                 mddev->in_sync = 1;
8455                                 did_change = 1;
8456                                 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
8457                         }
8458                         if (mddev->safemode == 1)
8459                                 mddev->safemode = 0;
8460                         spin_unlock(&mddev->lock);
8461                         if (did_change)
8462                                 sysfs_notify_dirent_safe(mddev->sysfs_state);
8463                 }
8464
8465                 if (mddev->sb_flags)
8466                         md_update_sb(mddev, 0);
8467
8468                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
8469                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
8470                         /* resync/recovery still happening */
8471                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8472                         goto unlock;
8473                 }
8474                 if (mddev->sync_thread) {
8475                         md_reap_sync_thread(mddev);
8476                         goto unlock;
8477                 }
8478                 /* Set RUNNING before clearing NEEDED to avoid
8479                  * any transients in the value of "sync_action".
8480                  */
8481                 mddev->curr_resync_completed = 0;
8482                 spin_lock(&mddev->lock);
8483                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8484                 spin_unlock(&mddev->lock);
8485                 /* Clear some bits that don't mean anything, but
8486                  * might be left set
8487                  */
8488                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
8489                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8490
8491                 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
8492                     test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
8493                         goto not_running;
8494                 /* no recovery is running.
8495                  * remove any failed drives, then
8496                  * add spares if possible.
8497                  * Spares are also removed and re-added, to allow
8498                  * the personality to fail the re-add.
8499                  */
8500
8501                 if (mddev->reshape_position != MaxSector) {
8502                         if (mddev->pers->check_reshape == NULL ||
8503                             mddev->pers->check_reshape(mddev) != 0)
8504                                 /* Cannot proceed */
8505                                 goto not_running;
8506                         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8507                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8508                 } else if ((spares = remove_and_add_spares(mddev, NULL))) {
8509                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8510                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8511                         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8512                         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8513                 } else if (mddev->recovery_cp < MaxSector) {
8514                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8515                         clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
8516                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
8517                         /* nothing to be done ... */
8518                         goto not_running;
8519
8520                 if (mddev->pers->sync_request) {
8521                         if (spares) {
8522                                 /* We are adding a device or devices to an array
8523                                  * which has the bitmap stored on all devices.
8524                                  * So make sure all bitmap pages get written
8525                                  */
8526                                 bitmap_write_all(mddev->bitmap);
8527                         }
8528                         INIT_WORK(&mddev->del_work, md_start_sync);
8529                         queue_work(md_misc_wq, &mddev->del_work);
8530                         goto unlock;
8531                 }
8532         not_running:
8533                 if (!mddev->sync_thread) {
8534                         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8535                         wake_up(&resync_wait);
8536                         if (test_and_clear_bit(MD_RECOVERY_RECOVER,
8537                                                &mddev->recovery))
8538                                 if (mddev->sysfs_action)
8539                                         sysfs_notify_dirent_safe(mddev->sysfs_action);
8540                 }
8541         unlock:
8542                 wake_up(&mddev->sb_wait);
8543                 mddev_unlock(mddev);
8544         }
8545 }
8546 EXPORT_SYMBOL(md_check_recovery);
8547
8548 void md_reap_sync_thread(struct mddev *mddev)
8549 {
8550         struct md_rdev *rdev;
8551
8552         /* resync has finished, collect result */
8553         md_unregister_thread(&mddev->sync_thread);
8554         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
8555             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
8556                 /* success...*/
8557                 /* activate any spares */
8558                 if (mddev->pers->spare_active(mddev)) {
8559                         sysfs_notify(&mddev->kobj, NULL,
8560                                      "degraded");
8561                         set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8562                 }
8563         }
8564         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
8565             mddev->pers->finish_reshape)
8566                 mddev->pers->finish_reshape(mddev);
8567
8568         /* If array is no-longer degraded, then any saved_raid_disk
8569          * information must be scrapped.
8570          */
8571         if (!mddev->degraded)
8572                 rdev_for_each(rdev, mddev)
8573                         rdev->saved_raid_disk = -1;
8574
8575         md_update_sb(mddev, 1);
8576         /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
8577          * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
8578          * clustered raid */
8579         if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
8580                 md_cluster_ops->resync_finish(mddev);
8581         clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8582         clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8583         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8584         clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8585         clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
8586         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8587         wake_up(&resync_wait);
8588         /* flag recovery needed just to double check */
8589         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8590         sysfs_notify_dirent_safe(mddev->sysfs_action);
8591         md_new_event(mddev);
8592         if (mddev->event_work.func)
8593                 queue_work(md_misc_wq, &mddev->event_work);
8594 }
8595 EXPORT_SYMBOL(md_reap_sync_thread);
8596
8597 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
8598 {
8599         sysfs_notify_dirent_safe(rdev->sysfs_state);
8600         wait_event_timeout(rdev->blocked_wait,
8601                            !test_bit(Blocked, &rdev->flags) &&
8602                            !test_bit(BlockedBadBlocks, &rdev->flags),
8603                            msecs_to_jiffies(5000));
8604         rdev_dec_pending(rdev, mddev);
8605 }
8606 EXPORT_SYMBOL(md_wait_for_blocked_rdev);
8607
8608 void md_finish_reshape(struct mddev *mddev)
8609 {
8610         /* called be personality module when reshape completes. */
8611         struct md_rdev *rdev;
8612
8613         rdev_for_each(rdev, mddev) {
8614                 if (rdev->data_offset > rdev->new_data_offset)
8615                         rdev->sectors += rdev->data_offset - rdev->new_data_offset;
8616                 else
8617                         rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
8618                 rdev->data_offset = rdev->new_data_offset;
8619         }
8620 }
8621 EXPORT_SYMBOL(md_finish_reshape);
8622
8623 /* Bad block management */
8624
8625 /* Returns 1 on success, 0 on failure */
8626 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8627                        int is_new)
8628 {
8629         struct mddev *mddev = rdev->mddev;
8630         int rv;
8631         if (is_new)
8632                 s += rdev->new_data_offset;
8633         else
8634                 s += rdev->data_offset;
8635         rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
8636         if (rv == 0) {
8637                 /* Make sure they get written out promptly */
8638                 if (test_bit(ExternalBbl, &rdev->flags))
8639                         sysfs_notify(&rdev->kobj, NULL,
8640                                      "unacknowledged_bad_blocks");
8641                 sysfs_notify_dirent_safe(rdev->sysfs_state);
8642                 set_mask_bits(&mddev->sb_flags, 0,
8643                               BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
8644                 md_wakeup_thread(rdev->mddev->thread);
8645                 return 1;
8646         } else
8647                 return 0;
8648 }
8649 EXPORT_SYMBOL_GPL(rdev_set_badblocks);
8650
8651 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
8652                          int is_new)
8653 {
8654         int rv;
8655         if (is_new)
8656                 s += rdev->new_data_offset;
8657         else
8658                 s += rdev->data_offset;
8659         rv = badblocks_clear(&rdev->badblocks, s, sectors);
8660         if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
8661                 sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
8662         return rv;
8663 }
8664 EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
8665
8666 static int md_notify_reboot(struct notifier_block *this,
8667                             unsigned long code, void *x)
8668 {
8669         struct list_head *tmp;
8670         struct mddev *mddev;
8671         int need_delay = 0;
8672
8673         for_each_mddev(mddev, tmp) {
8674                 if (mddev_trylock(mddev)) {
8675                         if (mddev->pers)
8676                                 __md_stop_writes(mddev);
8677                         if (mddev->persistent)
8678                                 mddev->safemode = 2;
8679                         mddev_unlock(mddev);
8680                 }
8681                 need_delay = 1;
8682         }
8683         /*
8684          * certain more exotic SCSI devices are known to be
8685          * volatile wrt too early system reboots. While the
8686          * right place to handle this issue is the given
8687          * driver, we do want to have a safe RAID driver ...
8688          */
8689         if (need_delay)
8690                 mdelay(1000*1);
8691
8692         return NOTIFY_DONE;
8693 }
8694
8695 static struct notifier_block md_notifier = {
8696         .notifier_call  = md_notify_reboot,
8697         .next           = NULL,
8698         .priority       = INT_MAX, /* before any real devices */
8699 };
8700
8701 static void md_geninit(void)
8702 {
8703         pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
8704
8705         proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
8706 }
8707
8708 static int __init md_init(void)
8709 {
8710         int ret = -ENOMEM;
8711
8712         md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
8713         if (!md_wq)
8714                 goto err_wq;
8715
8716         md_misc_wq = alloc_workqueue("md_misc", 0, 0);
8717         if (!md_misc_wq)
8718                 goto err_misc_wq;
8719
8720         if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
8721                 goto err_md;
8722
8723         if ((ret = register_blkdev(0, "mdp")) < 0)
8724                 goto err_mdp;
8725         mdp_major = ret;
8726
8727         blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
8728                             md_probe, NULL, NULL);
8729         blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
8730                             md_probe, NULL, NULL);
8731
8732         register_reboot_notifier(&md_notifier);
8733         raid_table_header = register_sysctl_table(raid_root_table);
8734
8735         md_geninit();
8736         return 0;
8737
8738 err_mdp:
8739         unregister_blkdev(MD_MAJOR, "md");
8740 err_md:
8741         destroy_workqueue(md_misc_wq);
8742 err_misc_wq:
8743         destroy_workqueue(md_wq);
8744 err_wq:
8745         return ret;
8746 }
8747
8748 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8749 {
8750         struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8751         struct md_rdev *rdev2;
8752         int role, ret;
8753         char b[BDEVNAME_SIZE];
8754
8755         /* Check for change of roles in the active devices */
8756         rdev_for_each(rdev2, mddev) {
8757                 if (test_bit(Faulty, &rdev2->flags))
8758                         continue;
8759
8760                 /* Check if the roles changed */
8761                 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
8762
8763                 if (test_bit(Candidate, &rdev2->flags)) {
8764                         if (role == 0xfffe) {
8765                                 pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
8766                                 md_kick_rdev_from_array(rdev2);
8767                                 continue;
8768                         }
8769                         else
8770                                 clear_bit(Candidate, &rdev2->flags);
8771                 }
8772
8773                 if (role != rdev2->raid_disk) {
8774                         /* got activated */
8775                         if (rdev2->raid_disk == -1 && role != 0xffff) {
8776                                 rdev2->saved_raid_disk = role;
8777                                 ret = remove_and_add_spares(mddev, rdev2);
8778                                 pr_info("Activated spare: %s\n",
8779                                         bdevname(rdev2->bdev,b));
8780                                 /* wakeup mddev->thread here, so array could
8781                                  * perform resync with the new activated disk */
8782                                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8783                                 md_wakeup_thread(mddev->thread);
8784
8785                         }
8786                         /* device faulty
8787                          * We just want to do the minimum to mark the disk
8788                          * as faulty. The recovery is performed by the
8789                          * one who initiated the error.
8790                          */
8791                         if ((role == 0xfffe) || (role == 0xfffd)) {
8792                                 md_error(mddev, rdev2);
8793                                 clear_bit(Blocked, &rdev2->flags);
8794                         }
8795                 }
8796         }
8797
8798         if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
8799                 update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
8800
8801         /* Finally set the event to be up to date */
8802         mddev->events = le64_to_cpu(sb->events);
8803 }
8804
8805 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
8806 {
8807         int err;
8808         struct page *swapout = rdev->sb_page;
8809         struct mdp_superblock_1 *sb;
8810
8811         /* Store the sb page of the rdev in the swapout temporary
8812          * variable in case we err in the future
8813          */
8814         rdev->sb_page = NULL;
8815         err = alloc_disk_sb(rdev);
8816         if (err == 0) {
8817                 ClearPageUptodate(rdev->sb_page);
8818                 rdev->sb_loaded = 0;
8819                 err = super_types[mddev->major_version].
8820                         load_super(rdev, NULL, mddev->minor_version);
8821         }
8822         if (err < 0) {
8823                 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
8824                                 __func__, __LINE__, rdev->desc_nr, err);
8825                 if (rdev->sb_page)
8826                         put_page(rdev->sb_page);
8827                 rdev->sb_page = swapout;
8828                 rdev->sb_loaded = 1;
8829                 return err;
8830         }
8831
8832         sb = page_address(rdev->sb_page);
8833         /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
8834          * is not set
8835          */
8836
8837         if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
8838                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
8839
8840         /* The other node finished recovery, call spare_active to set
8841          * device In_sync and mddev->degraded
8842          */
8843         if (rdev->recovery_offset == MaxSector &&
8844             !test_bit(In_sync, &rdev->flags) &&
8845             mddev->pers->spare_active(mddev))
8846                 sysfs_notify(&mddev->kobj, NULL, "degraded");
8847
8848         put_page(swapout);
8849         return 0;
8850 }
8851
8852 void md_reload_sb(struct mddev *mddev, int nr)
8853 {
8854         struct md_rdev *rdev;
8855         int err;
8856
8857         /* Find the rdev */
8858         rdev_for_each_rcu(rdev, mddev) {
8859                 if (rdev->desc_nr == nr)
8860                         break;
8861         }
8862
8863         if (!rdev || rdev->desc_nr != nr) {
8864                 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
8865                 return;
8866         }
8867
8868         err = read_rdev(mddev, rdev);
8869         if (err < 0)
8870                 return;
8871
8872         check_sb_changes(mddev, rdev);
8873
8874         /* Read all rdev's to update recovery_offset */
8875         rdev_for_each_rcu(rdev, mddev)
8876                 read_rdev(mddev, rdev);
8877 }
8878 EXPORT_SYMBOL(md_reload_sb);
8879
8880 #ifndef MODULE
8881
8882 /*
8883  * Searches all registered partitions for autorun RAID arrays
8884  * at boot time.
8885  */
8886
8887 static DEFINE_MUTEX(detected_devices_mutex);
8888 static LIST_HEAD(all_detected_devices);
8889 struct detected_devices_node {
8890         struct list_head list;
8891         dev_t dev;
8892 };
8893
8894 void md_autodetect_dev(dev_t dev)
8895 {
8896         struct detected_devices_node *node_detected_dev;
8897
8898         node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
8899         if (node_detected_dev) {
8900                 node_detected_dev->dev = dev;
8901                 mutex_lock(&detected_devices_mutex);
8902                 list_add_tail(&node_detected_dev->list, &all_detected_devices);
8903                 mutex_unlock(&detected_devices_mutex);
8904         }
8905 }
8906
8907 static void autostart_arrays(int part)
8908 {
8909         struct md_rdev *rdev;
8910         struct detected_devices_node *node_detected_dev;
8911         dev_t dev;
8912         int i_scanned, i_passed;
8913
8914         i_scanned = 0;
8915         i_passed = 0;
8916
8917         pr_info("md: Autodetecting RAID arrays.\n");
8918
8919         mutex_lock(&detected_devices_mutex);
8920         while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
8921                 i_scanned++;
8922                 node_detected_dev = list_entry(all_detected_devices.next,
8923                                         struct detected_devices_node, list);
8924                 list_del(&node_detected_dev->list);
8925                 dev = node_detected_dev->dev;
8926                 kfree(node_detected_dev);
8927                 mutex_unlock(&detected_devices_mutex);
8928                 rdev = md_import_device(dev,0, 90);
8929                 mutex_lock(&detected_devices_mutex);
8930                 if (IS_ERR(rdev))
8931                         continue;
8932
8933                 if (test_bit(Faulty, &rdev->flags))
8934                         continue;
8935
8936                 set_bit(AutoDetected, &rdev->flags);
8937                 list_add(&rdev->same_set, &pending_raid_disks);
8938                 i_passed++;
8939         }
8940         mutex_unlock(&detected_devices_mutex);
8941
8942         pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
8943
8944         autorun_devices(part);
8945 }
8946
8947 #endif /* !MODULE */
8948
8949 static __exit void md_exit(void)
8950 {
8951         struct mddev *mddev;
8952         struct list_head *tmp;
8953         int delay = 1;
8954
8955         blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
8956         blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
8957
8958         unregister_blkdev(MD_MAJOR,"md");
8959         unregister_blkdev(mdp_major, "mdp");
8960         unregister_reboot_notifier(&md_notifier);
8961         unregister_sysctl_table(raid_table_header);
8962
8963         /* We cannot unload the modules while some process is
8964          * waiting for us in select() or poll() - wake them up
8965          */
8966         md_unloading = 1;
8967         while (waitqueue_active(&md_event_waiters)) {
8968                 /* not safe to leave yet */
8969                 wake_up(&md_event_waiters);
8970                 msleep(delay);
8971                 delay += delay;
8972         }
8973         remove_proc_entry("mdstat", NULL);
8974
8975         for_each_mddev(mddev, tmp) {
8976                 export_array(mddev);
8977                 mddev->ctime = 0;
8978                 mddev->hold_active = 0;
8979                 /*
8980                  * for_each_mddev() will call mddev_put() at the end of each
8981                  * iteration.  As the mddev is now fully clear, this will
8982                  * schedule the mddev for destruction by a workqueue, and the
8983                  * destroy_workqueue() below will wait for that to complete.
8984                  */
8985         }
8986         destroy_workqueue(md_misc_wq);
8987         destroy_workqueue(md_wq);
8988 }
8989
8990 subsys_initcall(md_init);
8991 module_exit(md_exit)
8992
8993 static int get_ro(char *buffer, struct kernel_param *kp)
8994 {
8995         return sprintf(buffer, "%d", start_readonly);
8996 }
8997 static int set_ro(const char *val, struct kernel_param *kp)
8998 {
8999         return kstrtouint(val, 10, (unsigned int *)&start_readonly);
9000 }
9001
9002 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
9003 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
9004 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
9005
9006 MODULE_LICENSE("GPL");
9007 MODULE_DESCRIPTION("MD RAID framework");
9008 MODULE_ALIAS("md");
9009 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);