]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/md/md.c
[PATCH] md: Allow rdev state to be set via sysfs
[mv-sheeva.git] / drivers / md / md.c
1 /*
2    md.c : Multiple Devices driver for Linux
3           Copyright (C) 1998, 1999, 2000 Ingo Molnar
4
5      completely rewritten, based on the MD driver code from Marc Zyngier
6
7    Changes:
8
9    - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
10    - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
11    - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
12    - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
13    - kmod support by: Cyrus Durgin
14    - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
15    - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
16
17    - lots of fixes and improvements to the RAID1/RAID5 and generic
18      RAID code (such as request based resynchronization):
19
20      Neil Brown <neilb@cse.unsw.edu.au>.
21
22    - persistent bitmap code
23      Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
24
25    This program is free software; you can redistribute it and/or modify
26    it under the terms of the GNU General Public License as published by
27    the Free Software Foundation; either version 2, or (at your option)
28    any later version.
29
30    You should have received a copy of the GNU General Public License
31    (for example /usr/src/linux/COPYING); if not, write to the Free
32    Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35 #include <linux/module.h>
36 #include <linux/config.h>
37 #include <linux/kthread.h>
38 #include <linux/linkage.h>
39 #include <linux/raid/md.h>
40 #include <linux/raid/bitmap.h>
41 #include <linux/sysctl.h>
42 #include <linux/devfs_fs_kernel.h>
43 #include <linux/buffer_head.h> /* for invalidate_bdev */
44 #include <linux/suspend.h>
45 #include <linux/poll.h>
46 #include <linux/mutex.h>
47 #include <linux/ctype.h>
48
49 #include <linux/init.h>
50
51 #include <linux/file.h>
52
53 #ifdef CONFIG_KMOD
54 #include <linux/kmod.h>
55 #endif
56
57 #include <asm/unaligned.h>
58
59 #define MAJOR_NR MD_MAJOR
60 #define MD_DRIVER
61
62 /* 63 partitions with the alternate major number (mdp) */
63 #define MdpMinorShift 6
64
65 #define DEBUG 0
66 #define dprintk(x...) ((void)(DEBUG && printk(x)))
67
68
69 #ifndef MODULE
70 static void autostart_arrays (int part);
71 #endif
72
73 static LIST_HEAD(pers_list);
74 static DEFINE_SPINLOCK(pers_lock);
75
76 static void md_print_devices(void);
77
78 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
79
80 /*
81  * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
82  * is 1000 KB/sec, so the extra system load does not show up that much.
83  * Increase it if you want to have more _guaranteed_ speed. Note that
84  * the RAID driver will use the maximum available bandwidth if the IO
85  * subsystem is idle. There is also an 'absolute maximum' reconstruction
86  * speed limit - in case reconstruction slows down your system despite
87  * idle IO detection.
88  *
89  * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
90  * or /sys/block/mdX/md/sync_speed_{min,max}
91  */
92
93 static int sysctl_speed_limit_min = 1000;
94 static int sysctl_speed_limit_max = 200000;
95 static inline int speed_min(mddev_t *mddev)
96 {
97         return mddev->sync_speed_min ?
98                 mddev->sync_speed_min : sysctl_speed_limit_min;
99 }
100
101 static inline int speed_max(mddev_t *mddev)
102 {
103         return mddev->sync_speed_max ?
104                 mddev->sync_speed_max : sysctl_speed_limit_max;
105 }
106
107 static struct ctl_table_header *raid_table_header;
108
109 static ctl_table raid_table[] = {
110         {
111                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MIN,
112                 .procname       = "speed_limit_min",
113                 .data           = &sysctl_speed_limit_min,
114                 .maxlen         = sizeof(int),
115                 .mode           = 0644,
116                 .proc_handler   = &proc_dointvec,
117         },
118         {
119                 .ctl_name       = DEV_RAID_SPEED_LIMIT_MAX,
120                 .procname       = "speed_limit_max",
121                 .data           = &sysctl_speed_limit_max,
122                 .maxlen         = sizeof(int),
123                 .mode           = 0644,
124                 .proc_handler   = &proc_dointvec,
125         },
126         { .ctl_name = 0 }
127 };
128
129 static ctl_table raid_dir_table[] = {
130         {
131                 .ctl_name       = DEV_RAID,
132                 .procname       = "raid",
133                 .maxlen         = 0,
134                 .mode           = 0555,
135                 .child          = raid_table,
136         },
137         { .ctl_name = 0 }
138 };
139
140 static ctl_table raid_root_table[] = {
141         {
142                 .ctl_name       = CTL_DEV,
143                 .procname       = "dev",
144                 .maxlen         = 0,
145                 .mode           = 0555,
146                 .child          = raid_dir_table,
147         },
148         { .ctl_name = 0 }
149 };
150
151 static struct block_device_operations md_fops;
152
153 static int start_readonly;
154
155 /*
156  * We have a system wide 'event count' that is incremented
157  * on any 'interesting' event, and readers of /proc/mdstat
158  * can use 'poll' or 'select' to find out when the event
159  * count increases.
160  *
161  * Events are:
162  *  start array, stop array, error, add device, remove device,
163  *  start build, activate spare
164  */
165 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
166 static atomic_t md_event_count;
167 void md_new_event(mddev_t *mddev)
168 {
169         atomic_inc(&md_event_count);
170         wake_up(&md_event_waiters);
171         sysfs_notify(&mddev->kobj, NULL, "sync_action");
172 }
173 EXPORT_SYMBOL_GPL(md_new_event);
174
175 /* Alternate version that can be called from interrupts
176  * when calling sysfs_notify isn't needed.
177  */
178 void md_new_event_inintr(mddev_t *mddev)
179 {
180         atomic_inc(&md_event_count);
181         wake_up(&md_event_waiters);
182 }
183
184 /*
185  * Enables to iterate over all existing md arrays
186  * all_mddevs_lock protects this list.
187  */
188 static LIST_HEAD(all_mddevs);
189 static DEFINE_SPINLOCK(all_mddevs_lock);
190
191
192 /*
193  * iterates through all used mddevs in the system.
194  * We take care to grab the all_mddevs_lock whenever navigating
195  * the list, and to always hold a refcount when unlocked.
196  * Any code which breaks out of this loop while own
197  * a reference to the current mddev and must mddev_put it.
198  */
199 #define ITERATE_MDDEV(mddev,tmp)                                        \
200                                                                         \
201         for (({ spin_lock(&all_mddevs_lock);                            \
202                 tmp = all_mddevs.next;                                  \
203                 mddev = NULL;});                                        \
204              ({ if (tmp != &all_mddevs)                                 \
205                         mddev_get(list_entry(tmp, mddev_t, all_mddevs));\
206                 spin_unlock(&all_mddevs_lock);                          \
207                 if (mddev) mddev_put(mddev);                            \
208                 mddev = list_entry(tmp, mddev_t, all_mddevs);           \
209                 tmp != &all_mddevs;});                                  \
210              ({ spin_lock(&all_mddevs_lock);                            \
211                 tmp = tmp->next;})                                      \
212                 )
213
214
215 static int md_fail_request (request_queue_t *q, struct bio *bio)
216 {
217         bio_io_error(bio, bio->bi_size);
218         return 0;
219 }
220
221 static inline mddev_t *mddev_get(mddev_t *mddev)
222 {
223         atomic_inc(&mddev->active);
224         return mddev;
225 }
226
227 static void mddev_put(mddev_t *mddev)
228 {
229         if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
230                 return;
231         if (!mddev->raid_disks && list_empty(&mddev->disks)) {
232                 list_del(&mddev->all_mddevs);
233                 spin_unlock(&all_mddevs_lock);
234                 blk_cleanup_queue(mddev->queue);
235                 kobject_unregister(&mddev->kobj);
236         } else
237                 spin_unlock(&all_mddevs_lock);
238 }
239
240 static mddev_t * mddev_find(dev_t unit)
241 {
242         mddev_t *mddev, *new = NULL;
243
244  retry:
245         spin_lock(&all_mddevs_lock);
246         list_for_each_entry(mddev, &all_mddevs, all_mddevs)
247                 if (mddev->unit == unit) {
248                         mddev_get(mddev);
249                         spin_unlock(&all_mddevs_lock);
250                         kfree(new);
251                         return mddev;
252                 }
253
254         if (new) {
255                 list_add(&new->all_mddevs, &all_mddevs);
256                 spin_unlock(&all_mddevs_lock);
257                 return new;
258         }
259         spin_unlock(&all_mddevs_lock);
260
261         new = kzalloc(sizeof(*new), GFP_KERNEL);
262         if (!new)
263                 return NULL;
264
265         new->unit = unit;
266         if (MAJOR(unit) == MD_MAJOR)
267                 new->md_minor = MINOR(unit);
268         else
269                 new->md_minor = MINOR(unit) >> MdpMinorShift;
270
271         mutex_init(&new->reconfig_mutex);
272         INIT_LIST_HEAD(&new->disks);
273         INIT_LIST_HEAD(&new->all_mddevs);
274         init_timer(&new->safemode_timer);
275         atomic_set(&new->active, 1);
276         spin_lock_init(&new->write_lock);
277         init_waitqueue_head(&new->sb_wait);
278
279         new->queue = blk_alloc_queue(GFP_KERNEL);
280         if (!new->queue) {
281                 kfree(new);
282                 return NULL;
283         }
284         set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
285
286         blk_queue_make_request(new->queue, md_fail_request);
287
288         goto retry;
289 }
290
291 static inline int mddev_lock(mddev_t * mddev)
292 {
293         return mutex_lock_interruptible(&mddev->reconfig_mutex);
294 }
295
296 static inline int mddev_trylock(mddev_t * mddev)
297 {
298         return mutex_trylock(&mddev->reconfig_mutex);
299 }
300
301 static inline void mddev_unlock(mddev_t * mddev)
302 {
303         mutex_unlock(&mddev->reconfig_mutex);
304
305         md_wakeup_thread(mddev->thread);
306 }
307
308 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
309 {
310         mdk_rdev_t * rdev;
311         struct list_head *tmp;
312
313         ITERATE_RDEV(mddev,rdev,tmp) {
314                 if (rdev->desc_nr == nr)
315                         return rdev;
316         }
317         return NULL;
318 }
319
320 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
321 {
322         struct list_head *tmp;
323         mdk_rdev_t *rdev;
324
325         ITERATE_RDEV(mddev,rdev,tmp) {
326                 if (rdev->bdev->bd_dev == dev)
327                         return rdev;
328         }
329         return NULL;
330 }
331
332 static struct mdk_personality *find_pers(int level, char *clevel)
333 {
334         struct mdk_personality *pers;
335         list_for_each_entry(pers, &pers_list, list) {
336                 if (level != LEVEL_NONE && pers->level == level)
337                         return pers;
338                 if (strcmp(pers->name, clevel)==0)
339                         return pers;
340         }
341         return NULL;
342 }
343
344 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
345 {
346         sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
347         return MD_NEW_SIZE_BLOCKS(size);
348 }
349
350 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size)
351 {
352         sector_t size;
353
354         size = rdev->sb_offset;
355
356         if (chunk_size)
357                 size &= ~((sector_t)chunk_size/1024 - 1);
358         return size;
359 }
360
361 static int alloc_disk_sb(mdk_rdev_t * rdev)
362 {
363         if (rdev->sb_page)
364                 MD_BUG();
365
366         rdev->sb_page = alloc_page(GFP_KERNEL);
367         if (!rdev->sb_page) {
368                 printk(KERN_ALERT "md: out of memory.\n");
369                 return -EINVAL;
370         }
371
372         return 0;
373 }
374
375 static void free_disk_sb(mdk_rdev_t * rdev)
376 {
377         if (rdev->sb_page) {
378                 put_page(rdev->sb_page);
379                 rdev->sb_loaded = 0;
380                 rdev->sb_page = NULL;
381                 rdev->sb_offset = 0;
382                 rdev->size = 0;
383         }
384 }
385
386
387 static int super_written(struct bio *bio, unsigned int bytes_done, int error)
388 {
389         mdk_rdev_t *rdev = bio->bi_private;
390         mddev_t *mddev = rdev->mddev;
391         if (bio->bi_size)
392                 return 1;
393
394         if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags))
395                 md_error(mddev, rdev);
396
397         if (atomic_dec_and_test(&mddev->pending_writes))
398                 wake_up(&mddev->sb_wait);
399         bio_put(bio);
400         return 0;
401 }
402
403 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error)
404 {
405         struct bio *bio2 = bio->bi_private;
406         mdk_rdev_t *rdev = bio2->bi_private;
407         mddev_t *mddev = rdev->mddev;
408         if (bio->bi_size)
409                 return 1;
410
411         if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
412             error == -EOPNOTSUPP) {
413                 unsigned long flags;
414                 /* barriers don't appear to be supported :-( */
415                 set_bit(BarriersNotsupp, &rdev->flags);
416                 mddev->barriers_work = 0;
417                 spin_lock_irqsave(&mddev->write_lock, flags);
418                 bio2->bi_next = mddev->biolist;
419                 mddev->biolist = bio2;
420                 spin_unlock_irqrestore(&mddev->write_lock, flags);
421                 wake_up(&mddev->sb_wait);
422                 bio_put(bio);
423                 return 0;
424         }
425         bio_put(bio2);
426         bio->bi_private = rdev;
427         return super_written(bio, bytes_done, error);
428 }
429
430 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
431                    sector_t sector, int size, struct page *page)
432 {
433         /* write first size bytes of page to sector of rdev
434          * Increment mddev->pending_writes before returning
435          * and decrement it on completion, waking up sb_wait
436          * if zero is reached.
437          * If an error occurred, call md_error
438          *
439          * As we might need to resubmit the request if BIO_RW_BARRIER
440          * causes ENOTSUPP, we allocate a spare bio...
441          */
442         struct bio *bio = bio_alloc(GFP_NOIO, 1);
443         int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC);
444
445         bio->bi_bdev = rdev->bdev;
446         bio->bi_sector = sector;
447         bio_add_page(bio, page, size, 0);
448         bio->bi_private = rdev;
449         bio->bi_end_io = super_written;
450         bio->bi_rw = rw;
451
452         atomic_inc(&mddev->pending_writes);
453         if (!test_bit(BarriersNotsupp, &rdev->flags)) {
454                 struct bio *rbio;
455                 rw |= (1<<BIO_RW_BARRIER);
456                 rbio = bio_clone(bio, GFP_NOIO);
457                 rbio->bi_private = bio;
458                 rbio->bi_end_io = super_written_barrier;
459                 submit_bio(rw, rbio);
460         } else
461                 submit_bio(rw, bio);
462 }
463
464 void md_super_wait(mddev_t *mddev)
465 {
466         /* wait for all superblock writes that were scheduled to complete.
467          * if any had to be retried (due to BARRIER problems), retry them
468          */
469         DEFINE_WAIT(wq);
470         for(;;) {
471                 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE);
472                 if (atomic_read(&mddev->pending_writes)==0)
473                         break;
474                 while (mddev->biolist) {
475                         struct bio *bio;
476                         spin_lock_irq(&mddev->write_lock);
477                         bio = mddev->biolist;
478                         mddev->biolist = bio->bi_next ;
479                         bio->bi_next = NULL;
480                         spin_unlock_irq(&mddev->write_lock);
481                         submit_bio(bio->bi_rw, bio);
482                 }
483                 schedule();
484         }
485         finish_wait(&mddev->sb_wait, &wq);
486 }
487
488 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error)
489 {
490         if (bio->bi_size)
491                 return 1;
492
493         complete((struct completion*)bio->bi_private);
494         return 0;
495 }
496
497 int sync_page_io(struct block_device *bdev, sector_t sector, int size,
498                    struct page *page, int rw)
499 {
500         struct bio *bio = bio_alloc(GFP_NOIO, 1);
501         struct completion event;
502         int ret;
503
504         rw |= (1 << BIO_RW_SYNC);
505
506         bio->bi_bdev = bdev;
507         bio->bi_sector = sector;
508         bio_add_page(bio, page, size, 0);
509         init_completion(&event);
510         bio->bi_private = &event;
511         bio->bi_end_io = bi_complete;
512         submit_bio(rw, bio);
513         wait_for_completion(&event);
514
515         ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
516         bio_put(bio);
517         return ret;
518 }
519 EXPORT_SYMBOL_GPL(sync_page_io);
520
521 static int read_disk_sb(mdk_rdev_t * rdev, int size)
522 {
523         char b[BDEVNAME_SIZE];
524         if (!rdev->sb_page) {
525                 MD_BUG();
526                 return -EINVAL;
527         }
528         if (rdev->sb_loaded)
529                 return 0;
530
531
532         if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ))
533                 goto fail;
534         rdev->sb_loaded = 1;
535         return 0;
536
537 fail:
538         printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n",
539                 bdevname(rdev->bdev,b));
540         return -EINVAL;
541 }
542
543 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
544 {
545         if (    (sb1->set_uuid0 == sb2->set_uuid0) &&
546                 (sb1->set_uuid1 == sb2->set_uuid1) &&
547                 (sb1->set_uuid2 == sb2->set_uuid2) &&
548                 (sb1->set_uuid3 == sb2->set_uuid3))
549
550                 return 1;
551
552         return 0;
553 }
554
555
556 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
557 {
558         int ret;
559         mdp_super_t *tmp1, *tmp2;
560
561         tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
562         tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
563
564         if (!tmp1 || !tmp2) {
565                 ret = 0;
566                 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n");
567                 goto abort;
568         }
569
570         *tmp1 = *sb1;
571         *tmp2 = *sb2;
572
573         /*
574          * nr_disks is not constant
575          */
576         tmp1->nr_disks = 0;
577         tmp2->nr_disks = 0;
578
579         if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4))
580                 ret = 0;
581         else
582                 ret = 1;
583
584 abort:
585         kfree(tmp1);
586         kfree(tmp2);
587         return ret;
588 }
589
590 static unsigned int calc_sb_csum(mdp_super_t * sb)
591 {
592         unsigned int disk_csum, csum;
593
594         disk_csum = sb->sb_csum;
595         sb->sb_csum = 0;
596         csum = csum_partial((void *)sb, MD_SB_BYTES, 0);
597         sb->sb_csum = disk_csum;
598         return csum;
599 }
600
601
602 /*
603  * Handle superblock details.
604  * We want to be able to handle multiple superblock formats
605  * so we have a common interface to them all, and an array of
606  * different handlers.
607  * We rely on user-space to write the initial superblock, and support
608  * reading and updating of superblocks.
609  * Interface methods are:
610  *   int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version)
611  *      loads and validates a superblock on dev.
612  *      if refdev != NULL, compare superblocks on both devices
613  *    Return:
614  *      0 - dev has a superblock that is compatible with refdev
615  *      1 - dev has a superblock that is compatible and newer than refdev
616  *          so dev should be used as the refdev in future
617  *     -EINVAL superblock incompatible or invalid
618  *     -othererror e.g. -EIO
619  *
620  *   int validate_super(mddev_t *mddev, mdk_rdev_t *dev)
621  *      Verify that dev is acceptable into mddev.
622  *       The first time, mddev->raid_disks will be 0, and data from
623  *       dev should be merged in.  Subsequent calls check that dev
624  *       is new enough.  Return 0 or -EINVAL
625  *
626  *   void sync_super(mddev_t *mddev, mdk_rdev_t *dev)
627  *     Update the superblock for rdev with data in mddev
628  *     This does not write to disc.
629  *
630  */
631
632 struct super_type  {
633         char            *name;
634         struct module   *owner;
635         int             (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version);
636         int             (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev);
637         void            (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
638 };
639
640 /*
641  * load_super for 0.90.0 
642  */
643 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
644 {
645         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
646         mdp_super_t *sb;
647         int ret;
648         sector_t sb_offset;
649
650         /*
651          * Calculate the position of the superblock,
652          * it's at the end of the disk.
653          *
654          * It also happens to be a multiple of 4Kb.
655          */
656         sb_offset = calc_dev_sboffset(rdev->bdev);
657         rdev->sb_offset = sb_offset;
658
659         ret = read_disk_sb(rdev, MD_SB_BYTES);
660         if (ret) return ret;
661
662         ret = -EINVAL;
663
664         bdevname(rdev->bdev, b);
665         sb = (mdp_super_t*)page_address(rdev->sb_page);
666
667         if (sb->md_magic != MD_SB_MAGIC) {
668                 printk(KERN_ERR "md: invalid raid superblock magic on %s\n",
669                        b);
670                 goto abort;
671         }
672
673         if (sb->major_version != 0 ||
674             sb->minor_version < 90 ||
675             sb->minor_version > 91) {
676                 printk(KERN_WARNING "Bad version number %d.%d on %s\n",
677                         sb->major_version, sb->minor_version,
678                         b);
679                 goto abort;
680         }
681
682         if (sb->raid_disks <= 0)
683                 goto abort;
684
685         if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) {
686                 printk(KERN_WARNING "md: invalid superblock checksum on %s\n",
687                         b);
688                 goto abort;
689         }
690
691         rdev->preferred_minor = sb->md_minor;
692         rdev->data_offset = 0;
693         rdev->sb_size = MD_SB_BYTES;
694
695         if (sb->level == LEVEL_MULTIPATH)
696                 rdev->desc_nr = -1;
697         else
698                 rdev->desc_nr = sb->this_disk.number;
699
700         if (refdev == 0)
701                 ret = 1;
702         else {
703                 __u64 ev1, ev2;
704                 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
705                 if (!uuid_equal(refsb, sb)) {
706                         printk(KERN_WARNING "md: %s has different UUID to %s\n",
707                                 b, bdevname(refdev->bdev,b2));
708                         goto abort;
709                 }
710                 if (!sb_equal(refsb, sb)) {
711                         printk(KERN_WARNING "md: %s has same UUID"
712                                " but different superblock to %s\n",
713                                b, bdevname(refdev->bdev, b2));
714                         goto abort;
715                 }
716                 ev1 = md_event(sb);
717                 ev2 = md_event(refsb);
718                 if (ev1 > ev2)
719                         ret = 1;
720                 else 
721                         ret = 0;
722         }
723         rdev->size = calc_dev_size(rdev, sb->chunk_size);
724
725         if (rdev->size < sb->size && sb->level > 1)
726                 /* "this cannot possibly happen" ... */
727                 ret = -EINVAL;
728
729  abort:
730         return ret;
731 }
732
733 /*
734  * validate_super for 0.90.0
735  */
736 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
737 {
738         mdp_disk_t *desc;
739         mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page);
740         __u64 ev1 = md_event(sb);
741
742         rdev->raid_disk = -1;
743         rdev->flags = 0;
744         if (mddev->raid_disks == 0) {
745                 mddev->major_version = 0;
746                 mddev->minor_version = sb->minor_version;
747                 mddev->patch_version = sb->patch_version;
748                 mddev->persistent = ! sb->not_persistent;
749                 mddev->chunk_size = sb->chunk_size;
750                 mddev->ctime = sb->ctime;
751                 mddev->utime = sb->utime;
752                 mddev->level = sb->level;
753                 mddev->clevel[0] = 0;
754                 mddev->layout = sb->layout;
755                 mddev->raid_disks = sb->raid_disks;
756                 mddev->size = sb->size;
757                 mddev->events = ev1;
758                 mddev->bitmap_offset = 0;
759                 mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
760
761                 if (mddev->minor_version >= 91) {
762                         mddev->reshape_position = sb->reshape_position;
763                         mddev->delta_disks = sb->delta_disks;
764                         mddev->new_level = sb->new_level;
765                         mddev->new_layout = sb->new_layout;
766                         mddev->new_chunk = sb->new_chunk;
767                 } else {
768                         mddev->reshape_position = MaxSector;
769                         mddev->delta_disks = 0;
770                         mddev->new_level = mddev->level;
771                         mddev->new_layout = mddev->layout;
772                         mddev->new_chunk = mddev->chunk_size;
773                 }
774
775                 if (sb->state & (1<<MD_SB_CLEAN))
776                         mddev->recovery_cp = MaxSector;
777                 else {
778                         if (sb->events_hi == sb->cp_events_hi && 
779                                 sb->events_lo == sb->cp_events_lo) {
780                                 mddev->recovery_cp = sb->recovery_cp;
781                         } else
782                                 mddev->recovery_cp = 0;
783                 }
784
785                 memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
786                 memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
787                 memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
788                 memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
789
790                 mddev->max_disks = MD_SB_DISKS;
791
792                 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
793                     mddev->bitmap_file == NULL) {
794                         if (mddev->level != 1 && mddev->level != 4
795                             && mddev->level != 5 && mddev->level != 6
796                             && mddev->level != 10) {
797                                 /* FIXME use a better test */
798                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
799                                 return -EINVAL;
800                         }
801                         mddev->bitmap_offset = mddev->default_bitmap_offset;
802                 }
803
804         } else if (mddev->pers == NULL) {
805                 /* Insist on good event counter while assembling */
806                 ++ev1;
807                 if (ev1 < mddev->events) 
808                         return -EINVAL;
809         } else if (mddev->bitmap) {
810                 /* if adding to array with a bitmap, then we can accept an
811                  * older device ... but not too old.
812                  */
813                 if (ev1 < mddev->bitmap->events_cleared)
814                         return 0;
815         } else {
816                 if (ev1 < mddev->events)
817                         /* just a hot-add of a new device, leave raid_disk at -1 */
818                         return 0;
819         }
820
821         if (mddev->level != LEVEL_MULTIPATH) {
822                 desc = sb->disks + rdev->desc_nr;
823
824                 if (desc->state & (1<<MD_DISK_FAULTY))
825                         set_bit(Faulty, &rdev->flags);
826                 else if (desc->state & (1<<MD_DISK_SYNC) /* &&
827                             desc->raid_disk < mddev->raid_disks */) {
828                         set_bit(In_sync, &rdev->flags);
829                         rdev->raid_disk = desc->raid_disk;
830                 }
831                 if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
832                         set_bit(WriteMostly, &rdev->flags);
833         } else /* MULTIPATH are always insync */
834                 set_bit(In_sync, &rdev->flags);
835         return 0;
836 }
837
838 /*
839  * sync_super for 0.90.0
840  */
841 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
842 {
843         mdp_super_t *sb;
844         struct list_head *tmp;
845         mdk_rdev_t *rdev2;
846         int next_spare = mddev->raid_disks;
847
848
849         /* make rdev->sb match mddev data..
850          *
851          * 1/ zero out disks
852          * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
853          * 3/ any empty disks < next_spare become removed
854          *
855          * disks[0] gets initialised to REMOVED because
856          * we cannot be sure from other fields if it has
857          * been initialised or not.
858          */
859         int i;
860         int active=0, working=0,failed=0,spare=0,nr_disks=0;
861
862         rdev->sb_size = MD_SB_BYTES;
863
864         sb = (mdp_super_t*)page_address(rdev->sb_page);
865
866         memset(sb, 0, sizeof(*sb));
867
868         sb->md_magic = MD_SB_MAGIC;
869         sb->major_version = mddev->major_version;
870         sb->patch_version = mddev->patch_version;
871         sb->gvalid_words  = 0; /* ignored */
872         memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
873         memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
874         memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
875         memcpy(&sb->set_uuid3, mddev->uuid+12,4);
876
877         sb->ctime = mddev->ctime;
878         sb->level = mddev->level;
879         sb->size  = mddev->size;
880         sb->raid_disks = mddev->raid_disks;
881         sb->md_minor = mddev->md_minor;
882         sb->not_persistent = !mddev->persistent;
883         sb->utime = mddev->utime;
884         sb->state = 0;
885         sb->events_hi = (mddev->events>>32);
886         sb->events_lo = (u32)mddev->events;
887
888         if (mddev->reshape_position == MaxSector)
889                 sb->minor_version = 90;
890         else {
891                 sb->minor_version = 91;
892                 sb->reshape_position = mddev->reshape_position;
893                 sb->new_level = mddev->new_level;
894                 sb->delta_disks = mddev->delta_disks;
895                 sb->new_layout = mddev->new_layout;
896                 sb->new_chunk = mddev->new_chunk;
897         }
898         mddev->minor_version = sb->minor_version;
899         if (mddev->in_sync)
900         {
901                 sb->recovery_cp = mddev->recovery_cp;
902                 sb->cp_events_hi = (mddev->events>>32);
903                 sb->cp_events_lo = (u32)mddev->events;
904                 if (mddev->recovery_cp == MaxSector)
905                         sb->state = (1<< MD_SB_CLEAN);
906         } else
907                 sb->recovery_cp = 0;
908
909         sb->layout = mddev->layout;
910         sb->chunk_size = mddev->chunk_size;
911
912         if (mddev->bitmap && mddev->bitmap_file == NULL)
913                 sb->state |= (1<<MD_SB_BITMAP_PRESENT);
914
915         sb->disks[0].state = (1<<MD_DISK_REMOVED);
916         ITERATE_RDEV(mddev,rdev2,tmp) {
917                 mdp_disk_t *d;
918                 int desc_nr;
919                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
920                     && !test_bit(Faulty, &rdev2->flags))
921                         desc_nr = rdev2->raid_disk;
922                 else
923                         desc_nr = next_spare++;
924                 rdev2->desc_nr = desc_nr;
925                 d = &sb->disks[rdev2->desc_nr];
926                 nr_disks++;
927                 d->number = rdev2->desc_nr;
928                 d->major = MAJOR(rdev2->bdev->bd_dev);
929                 d->minor = MINOR(rdev2->bdev->bd_dev);
930                 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
931                     && !test_bit(Faulty, &rdev2->flags))
932                         d->raid_disk = rdev2->raid_disk;
933                 else
934                         d->raid_disk = rdev2->desc_nr; /* compatibility */
935                 if (test_bit(Faulty, &rdev2->flags))
936                         d->state = (1<<MD_DISK_FAULTY);
937                 else if (test_bit(In_sync, &rdev2->flags)) {
938                         d->state = (1<<MD_DISK_ACTIVE);
939                         d->state |= (1<<MD_DISK_SYNC);
940                         active++;
941                         working++;
942                 } else {
943                         d->state = 0;
944                         spare++;
945                         working++;
946                 }
947                 if (test_bit(WriteMostly, &rdev2->flags))
948                         d->state |= (1<<MD_DISK_WRITEMOSTLY);
949         }
950         /* now set the "removed" and "faulty" bits on any missing devices */
951         for (i=0 ; i < mddev->raid_disks ; i++) {
952                 mdp_disk_t *d = &sb->disks[i];
953                 if (d->state == 0 && d->number == 0) {
954                         d->number = i;
955                         d->raid_disk = i;
956                         d->state = (1<<MD_DISK_REMOVED);
957                         d->state |= (1<<MD_DISK_FAULTY);
958                         failed++;
959                 }
960         }
961         sb->nr_disks = nr_disks;
962         sb->active_disks = active;
963         sb->working_disks = working;
964         sb->failed_disks = failed;
965         sb->spare_disks = spare;
966
967         sb->this_disk = sb->disks[rdev->desc_nr];
968         sb->sb_csum = calc_sb_csum(sb);
969 }
970
971 /*
972  * version 1 superblock
973  */
974
975 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb)
976 {
977         unsigned int disk_csum, csum;
978         unsigned long long newcsum;
979         int size = 256 + le32_to_cpu(sb->max_dev)*2;
980         unsigned int *isuper = (unsigned int*)sb;
981         int i;
982
983         disk_csum = sb->sb_csum;
984         sb->sb_csum = 0;
985         newcsum = 0;
986         for (i=0; size>=4; size -= 4 )
987                 newcsum += le32_to_cpu(*isuper++);
988
989         if (size == 2)
990                 newcsum += le16_to_cpu(*(unsigned short*) isuper);
991
992         csum = (newcsum & 0xffffffff) + (newcsum >> 32);
993         sb->sb_csum = disk_csum;
994         return cpu_to_le32(csum);
995 }
996
997 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
998 {
999         struct mdp_superblock_1 *sb;
1000         int ret;
1001         sector_t sb_offset;
1002         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1003         int bmask;
1004
1005         /*
1006          * Calculate the position of the superblock.
1007          * It is always aligned to a 4K boundary and
1008          * depeding on minor_version, it can be:
1009          * 0: At least 8K, but less than 12K, from end of device
1010          * 1: At start of device
1011          * 2: 4K from start of device.
1012          */
1013         switch(minor_version) {
1014         case 0:
1015                 sb_offset = rdev->bdev->bd_inode->i_size >> 9;
1016                 sb_offset -= 8*2;
1017                 sb_offset &= ~(sector_t)(4*2-1);
1018                 /* convert from sectors to K */
1019                 sb_offset /= 2;
1020                 break;
1021         case 1:
1022                 sb_offset = 0;
1023                 break;
1024         case 2:
1025                 sb_offset = 4;
1026                 break;
1027         default:
1028                 return -EINVAL;
1029         }
1030         rdev->sb_offset = sb_offset;
1031
1032         /* superblock is rarely larger than 1K, but it can be larger,
1033          * and it is safe to read 4k, so we do that
1034          */
1035         ret = read_disk_sb(rdev, 4096);
1036         if (ret) return ret;
1037
1038
1039         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1040
1041         if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
1042             sb->major_version != cpu_to_le32(1) ||
1043             le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
1044             le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) ||
1045             (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
1046                 return -EINVAL;
1047
1048         if (calc_sb_1_csum(sb) != sb->sb_csum) {
1049                 printk("md: invalid superblock checksum on %s\n",
1050                         bdevname(rdev->bdev,b));
1051                 return -EINVAL;
1052         }
1053         if (le64_to_cpu(sb->data_size) < 10) {
1054                 printk("md: data_size too small on %s\n",
1055                        bdevname(rdev->bdev,b));
1056                 return -EINVAL;
1057         }
1058         rdev->preferred_minor = 0xffff;
1059         rdev->data_offset = le64_to_cpu(sb->data_offset);
1060         atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1061
1062         rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1063         bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
1064         if (rdev->sb_size & bmask)
1065                 rdev-> sb_size = (rdev->sb_size | bmask)+1;
1066
1067         if (refdev == 0)
1068                 ret = 1;
1069         else {
1070                 __u64 ev1, ev2;
1071                 struct mdp_superblock_1 *refsb = 
1072                         (struct mdp_superblock_1*)page_address(refdev->sb_page);
1073
1074                 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
1075                     sb->level != refsb->level ||
1076                     sb->layout != refsb->layout ||
1077                     sb->chunksize != refsb->chunksize) {
1078                         printk(KERN_WARNING "md: %s has strangely different"
1079                                 " superblock to %s\n",
1080                                 bdevname(rdev->bdev,b),
1081                                 bdevname(refdev->bdev,b2));
1082                         return -EINVAL;
1083                 }
1084                 ev1 = le64_to_cpu(sb->events);
1085                 ev2 = le64_to_cpu(refsb->events);
1086
1087                 if (ev1 > ev2)
1088                         ret = 1;
1089                 else
1090                         ret = 0;
1091         }
1092         if (minor_version) 
1093                 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2;
1094         else
1095                 rdev->size = rdev->sb_offset;
1096         if (rdev->size < le64_to_cpu(sb->data_size)/2)
1097                 return -EINVAL;
1098         rdev->size = le64_to_cpu(sb->data_size)/2;
1099         if (le32_to_cpu(sb->chunksize))
1100                 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1);
1101
1102         if (le32_to_cpu(sb->size) > rdev->size*2)
1103                 return -EINVAL;
1104         return ret;
1105 }
1106
1107 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
1108 {
1109         struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1110         __u64 ev1 = le64_to_cpu(sb->events);
1111
1112         rdev->raid_disk = -1;
1113         rdev->flags = 0;
1114         if (mddev->raid_disks == 0) {
1115                 mddev->major_version = 1;
1116                 mddev->patch_version = 0;
1117                 mddev->persistent = 1;
1118                 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
1119                 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
1120                 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
1121                 mddev->level = le32_to_cpu(sb->level);
1122                 mddev->clevel[0] = 0;
1123                 mddev->layout = le32_to_cpu(sb->layout);
1124                 mddev->raid_disks = le32_to_cpu(sb->raid_disks);
1125                 mddev->size = le64_to_cpu(sb->size)/2;
1126                 mddev->events = ev1;
1127                 mddev->bitmap_offset = 0;
1128                 mddev->default_bitmap_offset = 1024 >> 9;
1129                 
1130                 mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
1131                 memcpy(mddev->uuid, sb->set_uuid, 16);
1132
1133                 mddev->max_disks =  (4096-256)/2;
1134
1135                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
1136                     mddev->bitmap_file == NULL ) {
1137                         if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6
1138                             && mddev->level != 10) {
1139                                 printk(KERN_WARNING "md: bitmaps not supported for this level.\n");
1140                                 return -EINVAL;
1141                         }
1142                         mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset);
1143                 }
1144                 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
1145                         mddev->reshape_position = le64_to_cpu(sb->reshape_position);
1146                         mddev->delta_disks = le32_to_cpu(sb->delta_disks);
1147                         mddev->new_level = le32_to_cpu(sb->new_level);
1148                         mddev->new_layout = le32_to_cpu(sb->new_layout);
1149                         mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9;
1150                 } else {
1151                         mddev->reshape_position = MaxSector;
1152                         mddev->delta_disks = 0;
1153                         mddev->new_level = mddev->level;
1154                         mddev->new_layout = mddev->layout;
1155                         mddev->new_chunk = mddev->chunk_size;
1156                 }
1157
1158         } else if (mddev->pers == NULL) {
1159                 /* Insist of good event counter while assembling */
1160                 ++ev1;
1161                 if (ev1 < mddev->events)
1162                         return -EINVAL;
1163         } else if (mddev->bitmap) {
1164                 /* If adding to array with a bitmap, then we can accept an
1165                  * older device, but not too old.
1166                  */
1167                 if (ev1 < mddev->bitmap->events_cleared)
1168                         return 0;
1169         } else {
1170                 if (ev1 < mddev->events)
1171                         /* just a hot-add of a new device, leave raid_disk at -1 */
1172                         return 0;
1173         }
1174         if (mddev->level != LEVEL_MULTIPATH) {
1175                 int role;
1176                 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1177                 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1178                 switch(role) {
1179                 case 0xffff: /* spare */
1180                         break;
1181                 case 0xfffe: /* faulty */
1182                         set_bit(Faulty, &rdev->flags);
1183                         break;
1184                 default:
1185                         if ((le32_to_cpu(sb->feature_map) &
1186                              MD_FEATURE_RECOVERY_OFFSET))
1187                                 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1188                         else
1189                                 set_bit(In_sync, &rdev->flags);
1190                         rdev->raid_disk = role;
1191                         break;
1192                 }
1193                 if (sb->devflags & WriteMostly1)
1194                         set_bit(WriteMostly, &rdev->flags);
1195         } else /* MULTIPATH are always insync */
1196                 set_bit(In_sync, &rdev->flags);
1197
1198         return 0;
1199 }
1200
1201 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
1202 {
1203         struct mdp_superblock_1 *sb;
1204         struct list_head *tmp;
1205         mdk_rdev_t *rdev2;
1206         int max_dev, i;
1207         /* make rdev->sb match mddev and rdev data. */
1208
1209         sb = (struct mdp_superblock_1*)page_address(rdev->sb_page);
1210
1211         sb->feature_map = 0;
1212         sb->pad0 = 0;
1213         sb->recovery_offset = cpu_to_le64(0);
1214         memset(sb->pad1, 0, sizeof(sb->pad1));
1215         memset(sb->pad2, 0, sizeof(sb->pad2));
1216         memset(sb->pad3, 0, sizeof(sb->pad3));
1217
1218         sb->utime = cpu_to_le64((__u64)mddev->utime);
1219         sb->events = cpu_to_le64(mddev->events);
1220         if (mddev->in_sync)
1221                 sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
1222         else
1223                 sb->resync_offset = cpu_to_le64(0);
1224
1225         sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors);
1226
1227         sb->raid_disks = cpu_to_le32(mddev->raid_disks);
1228         sb->size = cpu_to_le64(mddev->size<<1);
1229
1230         if (mddev->bitmap && mddev->bitmap_file == NULL) {
1231                 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset);
1232                 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
1233         }
1234
1235         if (rdev->raid_disk >= 0 &&
1236             !test_bit(In_sync, &rdev->flags) &&
1237             rdev->recovery_offset > 0) {
1238                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
1239                 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset);
1240         }
1241
1242         if (mddev->reshape_position != MaxSector) {
1243                 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
1244                 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
1245                 sb->new_layout = cpu_to_le32(mddev->new_layout);
1246                 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
1247                 sb->new_level = cpu_to_le32(mddev->new_level);
1248                 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9);
1249         }
1250
1251         max_dev = 0;
1252         ITERATE_RDEV(mddev,rdev2,tmp)
1253                 if (rdev2->desc_nr+1 > max_dev)
1254                         max_dev = rdev2->desc_nr+1;
1255         
1256         sb->max_dev = cpu_to_le32(max_dev);
1257         for (i=0; i<max_dev;i++)
1258                 sb->dev_roles[i] = cpu_to_le16(0xfffe);
1259         
1260         ITERATE_RDEV(mddev,rdev2,tmp) {
1261                 i = rdev2->desc_nr;
1262                 if (test_bit(Faulty, &rdev2->flags))
1263                         sb->dev_roles[i] = cpu_to_le16(0xfffe);
1264                 else if (test_bit(In_sync, &rdev2->flags))
1265                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1266                 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0)
1267                         sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
1268                 else
1269                         sb->dev_roles[i] = cpu_to_le16(0xffff);
1270         }
1271
1272         sb->sb_csum = calc_sb_1_csum(sb);
1273 }
1274
1275
1276 static struct super_type super_types[] = {
1277         [0] = {
1278                 .name   = "0.90.0",
1279                 .owner  = THIS_MODULE,
1280                 .load_super     = super_90_load,
1281                 .validate_super = super_90_validate,
1282                 .sync_super     = super_90_sync,
1283         },
1284         [1] = {
1285                 .name   = "md-1",
1286                 .owner  = THIS_MODULE,
1287                 .load_super     = super_1_load,
1288                 .validate_super = super_1_validate,
1289                 .sync_super     = super_1_sync,
1290         },
1291 };
1292         
1293 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev)
1294 {
1295         struct list_head *tmp;
1296         mdk_rdev_t *rdev;
1297
1298         ITERATE_RDEV(mddev,rdev,tmp)
1299                 if (rdev->bdev->bd_contains == dev->bdev->bd_contains)
1300                         return rdev;
1301
1302         return NULL;
1303 }
1304
1305 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1306 {
1307         struct list_head *tmp;
1308         mdk_rdev_t *rdev;
1309
1310         ITERATE_RDEV(mddev1,rdev,tmp)
1311                 if (match_dev_unit(mddev2, rdev))
1312                         return 1;
1313
1314         return 0;
1315 }
1316
1317 static LIST_HEAD(pending_raid_disks);
1318
1319 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
1320 {
1321         mdk_rdev_t *same_pdev;
1322         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
1323         struct kobject *ko;
1324         char *s;
1325
1326         if (rdev->mddev) {
1327                 MD_BUG();
1328                 return -EINVAL;
1329         }
1330         /* make sure rdev->size exceeds mddev->size */
1331         if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
1332                 if (mddev->pers)
1333                         /* Cannot change size, so fail */
1334                         return -ENOSPC;
1335                 else
1336                         mddev->size = rdev->size;
1337         }
1338         same_pdev = match_dev_unit(mddev, rdev);
1339         if (same_pdev)
1340                 printk(KERN_WARNING
1341                         "%s: WARNING: %s appears to be on the same physical"
1342                         " disk as %s. True\n     protection against single-disk"
1343                         " failure might be compromised.\n",
1344                         mdname(mddev), bdevname(rdev->bdev,b),
1345                         bdevname(same_pdev->bdev,b2));
1346
1347         /* Verify rdev->desc_nr is unique.
1348          * If it is -1, assign a free number, else
1349          * check number is not in use
1350          */
1351         if (rdev->desc_nr < 0) {
1352                 int choice = 0;
1353                 if (mddev->pers) choice = mddev->raid_disks;
1354                 while (find_rdev_nr(mddev, choice))
1355                         choice++;
1356                 rdev->desc_nr = choice;
1357         } else {
1358                 if (find_rdev_nr(mddev, rdev->desc_nr))
1359                         return -EBUSY;
1360         }
1361         bdevname(rdev->bdev,b);
1362         if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
1363                 return -ENOMEM;
1364         while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
1365                 *s = '!';
1366                         
1367         list_add(&rdev->same_set, &mddev->disks);
1368         rdev->mddev = mddev;
1369         printk(KERN_INFO "md: bind<%s>\n", b);
1370
1371         rdev->kobj.parent = &mddev->kobj;
1372         kobject_add(&rdev->kobj);
1373
1374         if (rdev->bdev->bd_part)
1375                 ko = &rdev->bdev->bd_part->kobj;
1376         else
1377                 ko = &rdev->bdev->bd_disk->kobj;
1378         sysfs_create_link(&rdev->kobj, ko, "block");
1379         bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
1380         return 0;
1381 }
1382
1383 static void unbind_rdev_from_array(mdk_rdev_t * rdev)
1384 {
1385         char b[BDEVNAME_SIZE];
1386         if (!rdev->mddev) {
1387                 MD_BUG();
1388                 return;
1389         }
1390         bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
1391         list_del_init(&rdev->same_set);
1392         printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
1393         rdev->mddev = NULL;
1394         sysfs_remove_link(&rdev->kobj, "block");
1395         kobject_del(&rdev->kobj);
1396 }
1397
1398 /*
1399  * prevent the device from being mounted, repartitioned or
1400  * otherwise reused by a RAID array (or any other kernel
1401  * subsystem), by bd_claiming the device.
1402  */
1403 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
1404 {
1405         int err = 0;
1406         struct block_device *bdev;
1407         char b[BDEVNAME_SIZE];
1408
1409         bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
1410         if (IS_ERR(bdev)) {
1411                 printk(KERN_ERR "md: could not open %s.\n",
1412                         __bdevname(dev, b));
1413                 return PTR_ERR(bdev);
1414         }
1415         err = bd_claim(bdev, rdev);
1416         if (err) {
1417                 printk(KERN_ERR "md: could not bd_claim %s.\n",
1418                         bdevname(bdev, b));
1419                 blkdev_put(bdev);
1420                 return err;
1421         }
1422         rdev->bdev = bdev;
1423         return err;
1424 }
1425
1426 static void unlock_rdev(mdk_rdev_t *rdev)
1427 {
1428         struct block_device *bdev = rdev->bdev;
1429         rdev->bdev = NULL;
1430         if (!bdev)
1431                 MD_BUG();
1432         bd_release(bdev);
1433         blkdev_put(bdev);
1434 }
1435
1436 void md_autodetect_dev(dev_t dev);
1437
1438 static void export_rdev(mdk_rdev_t * rdev)
1439 {
1440         char b[BDEVNAME_SIZE];
1441         printk(KERN_INFO "md: export_rdev(%s)\n",
1442                 bdevname(rdev->bdev,b));
1443         if (rdev->mddev)
1444                 MD_BUG();
1445         free_disk_sb(rdev);
1446         list_del_init(&rdev->same_set);
1447 #ifndef MODULE
1448         md_autodetect_dev(rdev->bdev->bd_dev);
1449 #endif
1450         unlock_rdev(rdev);
1451         kobject_put(&rdev->kobj);
1452 }
1453
1454 static void kick_rdev_from_array(mdk_rdev_t * rdev)
1455 {
1456         unbind_rdev_from_array(rdev);
1457         export_rdev(rdev);
1458 }
1459
1460 static void export_array(mddev_t *mddev)
1461 {
1462         struct list_head *tmp;
1463         mdk_rdev_t *rdev;
1464
1465         ITERATE_RDEV(mddev,rdev,tmp) {
1466                 if (!rdev->mddev) {
1467                         MD_BUG();
1468                         continue;
1469                 }
1470                 kick_rdev_from_array(rdev);
1471         }
1472         if (!list_empty(&mddev->disks))
1473                 MD_BUG();
1474         mddev->raid_disks = 0;
1475         mddev->major_version = 0;
1476 }
1477
1478 static void print_desc(mdp_disk_t *desc)
1479 {
1480         printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number,
1481                 desc->major,desc->minor,desc->raid_disk,desc->state);
1482 }
1483
1484 static void print_sb(mdp_super_t *sb)
1485 {
1486         int i;
1487
1488         printk(KERN_INFO 
1489                 "md:  SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n",
1490                 sb->major_version, sb->minor_version, sb->patch_version,
1491                 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3,
1492                 sb->ctime);
1493         printk(KERN_INFO "md:     L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n",
1494                 sb->level, sb->size, sb->nr_disks, sb->raid_disks,
1495                 sb->md_minor, sb->layout, sb->chunk_size);
1496         printk(KERN_INFO "md:     UT:%08x ST:%d AD:%d WD:%d"
1497                 " FD:%d SD:%d CSUM:%08x E:%08lx\n",
1498                 sb->utime, sb->state, sb->active_disks, sb->working_disks,
1499                 sb->failed_disks, sb->spare_disks,
1500                 sb->sb_csum, (unsigned long)sb->events_lo);
1501
1502         printk(KERN_INFO);
1503         for (i = 0; i < MD_SB_DISKS; i++) {
1504                 mdp_disk_t *desc;
1505
1506                 desc = sb->disks + i;
1507                 if (desc->number || desc->major || desc->minor ||
1508                     desc->raid_disk || (desc->state && (desc->state != 4))) {
1509                         printk("     D %2d: ", i);
1510                         print_desc(desc);
1511                 }
1512         }
1513         printk(KERN_INFO "md:     THIS: ");
1514         print_desc(&sb->this_disk);
1515
1516 }
1517
1518 static void print_rdev(mdk_rdev_t *rdev)
1519 {
1520         char b[BDEVNAME_SIZE];
1521         printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n",
1522                 bdevname(rdev->bdev,b), (unsigned long long)rdev->size,
1523                 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags),
1524                 rdev->desc_nr);
1525         if (rdev->sb_loaded) {
1526                 printk(KERN_INFO "md: rdev superblock:\n");
1527                 print_sb((mdp_super_t*)page_address(rdev->sb_page));
1528         } else
1529                 printk(KERN_INFO "md: no rdev superblock!\n");
1530 }
1531
1532 static void md_print_devices(void)
1533 {
1534         struct list_head *tmp, *tmp2;
1535         mdk_rdev_t *rdev;
1536         mddev_t *mddev;
1537         char b[BDEVNAME_SIZE];
1538
1539         printk("\n");
1540         printk("md:     **********************************\n");
1541         printk("md:     * <COMPLETE RAID STATE PRINTOUT> *\n");
1542         printk("md:     **********************************\n");
1543         ITERATE_MDDEV(mddev,tmp) {
1544
1545                 if (mddev->bitmap)
1546                         bitmap_print_sb(mddev->bitmap);
1547                 else
1548                         printk("%s: ", mdname(mddev));
1549                 ITERATE_RDEV(mddev,rdev,tmp2)
1550                         printk("<%s>", bdevname(rdev->bdev,b));
1551                 printk("\n");
1552
1553                 ITERATE_RDEV(mddev,rdev,tmp2)
1554                         print_rdev(rdev);
1555         }
1556         printk("md:     **********************************\n");
1557         printk("\n");
1558 }
1559
1560
1561 static void sync_sbs(mddev_t * mddev, int nospares)
1562 {
1563         /* Update each superblock (in-memory image), but
1564          * if we are allowed to, skip spares which already
1565          * have the right event counter, or have one earlier
1566          * (which would mean they aren't being marked as dirty
1567          * with the rest of the array)
1568          */
1569         mdk_rdev_t *rdev;
1570         struct list_head *tmp;
1571
1572         ITERATE_RDEV(mddev,rdev,tmp) {
1573                 if (rdev->sb_events == mddev->events ||
1574                     (nospares &&
1575                      rdev->raid_disk < 0 &&
1576                      (rdev->sb_events&1)==0 &&
1577                      rdev->sb_events+1 == mddev->events)) {
1578                         /* Don't update this superblock */
1579                         rdev->sb_loaded = 2;
1580                 } else {
1581                         super_types[mddev->major_version].
1582                                 sync_super(mddev, rdev);
1583                         rdev->sb_loaded = 1;
1584                 }
1585         }
1586 }
1587
1588 void md_update_sb(mddev_t * mddev)
1589 {
1590         int err;
1591         struct list_head *tmp;
1592         mdk_rdev_t *rdev;
1593         int sync_req;
1594         int nospares = 0;
1595
1596 repeat:
1597         spin_lock_irq(&mddev->write_lock);
1598         sync_req = mddev->in_sync;
1599         mddev->utime = get_seconds();
1600         if (mddev->sb_dirty == 3)
1601                 /* just a clean<-> dirty transition, possibly leave spares alone,
1602                  * though if events isn't the right even/odd, we will have to do
1603                  * spares after all
1604                  */
1605                 nospares = 1;
1606
1607         /* If this is just a dirty<->clean transition, and the array is clean
1608          * and 'events' is odd, we can roll back to the previous clean state */
1609         if (mddev->sb_dirty == 3
1610             && (mddev->in_sync && mddev->recovery_cp == MaxSector)
1611             && (mddev->events & 1))
1612                 mddev->events--;
1613         else {
1614                 /* otherwise we have to go forward and ... */
1615                 mddev->events ++;
1616                 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
1617                         /* .. if the array isn't clean, insist on an odd 'events' */
1618                         if ((mddev->events&1)==0) {
1619                                 mddev->events++;
1620                                 nospares = 0;
1621                         }
1622                 } else {
1623                         /* otherwise insist on an even 'events' (for clean states) */
1624                         if ((mddev->events&1)) {
1625                                 mddev->events++;
1626                                 nospares = 0;
1627                         }
1628                 }
1629         }
1630
1631         if (!mddev->events) {
1632                 /*
1633                  * oops, this 64-bit counter should never wrap.
1634                  * Either we are in around ~1 trillion A.C., assuming
1635                  * 1 reboot per second, or we have a bug:
1636                  */
1637                 MD_BUG();
1638                 mddev->events --;
1639         }
1640         mddev->sb_dirty = 2;
1641         sync_sbs(mddev, nospares);
1642
1643         /*
1644          * do not write anything to disk if using
1645          * nonpersistent superblocks
1646          */
1647         if (!mddev->persistent) {
1648                 mddev->sb_dirty = 0;
1649                 spin_unlock_irq(&mddev->write_lock);
1650                 wake_up(&mddev->sb_wait);
1651                 return;
1652         }
1653         spin_unlock_irq(&mddev->write_lock);
1654
1655         dprintk(KERN_INFO 
1656                 "md: updating %s RAID superblock on device (in sync %d)\n",
1657                 mdname(mddev),mddev->in_sync);
1658
1659         err = bitmap_update_sb(mddev->bitmap);
1660         ITERATE_RDEV(mddev,rdev,tmp) {
1661                 char b[BDEVNAME_SIZE];
1662                 dprintk(KERN_INFO "md: ");
1663                 if (rdev->sb_loaded != 1)
1664                         continue; /* no noise on spare devices */
1665                 if (test_bit(Faulty, &rdev->flags))
1666                         dprintk("(skipping faulty ");
1667
1668                 dprintk("%s ", bdevname(rdev->bdev,b));
1669                 if (!test_bit(Faulty, &rdev->flags)) {
1670                         md_super_write(mddev,rdev,
1671                                        rdev->sb_offset<<1, rdev->sb_size,
1672                                        rdev->sb_page);
1673                         dprintk(KERN_INFO "(write) %s's sb offset: %llu\n",
1674                                 bdevname(rdev->bdev,b),
1675                                 (unsigned long long)rdev->sb_offset);
1676                         rdev->sb_events = mddev->events;
1677
1678                 } else
1679                         dprintk(")\n");
1680                 if (mddev->level == LEVEL_MULTIPATH)
1681                         /* only need to write one superblock... */
1682                         break;
1683         }
1684         md_super_wait(mddev);
1685         /* if there was a failure, sb_dirty was set to 1, and we re-write super */
1686
1687         spin_lock_irq(&mddev->write_lock);
1688         if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) {
1689                 /* have to write it out again */
1690                 spin_unlock_irq(&mddev->write_lock);
1691                 goto repeat;
1692         }
1693         mddev->sb_dirty = 0;
1694         spin_unlock_irq(&mddev->write_lock);
1695         wake_up(&mddev->sb_wait);
1696
1697 }
1698 EXPORT_SYMBOL_GPL(md_update_sb);
1699
1700 /* words written to sysfs files may, or my not, be \n terminated.
1701  * We want to accept with case. For this we use cmd_match.
1702  */
1703 static int cmd_match(const char *cmd, const char *str)
1704 {
1705         /* See if cmd, written into a sysfs file, matches
1706          * str.  They must either be the same, or cmd can
1707          * have a trailing newline
1708          */
1709         while (*cmd && *str && *cmd == *str) {
1710                 cmd++;
1711                 str++;
1712         }
1713         if (*cmd == '\n')
1714                 cmd++;
1715         if (*str || *cmd)
1716                 return 0;
1717         return 1;
1718 }
1719
1720 struct rdev_sysfs_entry {
1721         struct attribute attr;
1722         ssize_t (*show)(mdk_rdev_t *, char *);
1723         ssize_t (*store)(mdk_rdev_t *, const char *, size_t);
1724 };
1725
1726 static ssize_t
1727 state_show(mdk_rdev_t *rdev, char *page)
1728 {
1729         char *sep = "";
1730         int len=0;
1731
1732         if (test_bit(Faulty, &rdev->flags)) {
1733                 len+= sprintf(page+len, "%sfaulty",sep);
1734                 sep = ",";
1735         }
1736         if (test_bit(In_sync, &rdev->flags)) {
1737                 len += sprintf(page+len, "%sin_sync",sep);
1738                 sep = ",";
1739         }
1740         if (!test_bit(Faulty, &rdev->flags) &&
1741             !test_bit(In_sync, &rdev->flags)) {
1742                 len += sprintf(page+len, "%sspare", sep);
1743                 sep = ",";
1744         }
1745         return len+sprintf(page+len, "\n");
1746 }
1747
1748 static ssize_t
1749 state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1750 {
1751         /* can write
1752          *  faulty  - simulates and error
1753          *  remove  - disconnects the device
1754          */
1755         int err = -EINVAL;
1756         if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
1757                 md_error(rdev->mddev, rdev);
1758                 err = 0;
1759         } else if (cmd_match(buf, "remove")) {
1760                 if (rdev->raid_disk >= 0)
1761                         err = -EBUSY;
1762                 else {
1763                         mddev_t *mddev = rdev->mddev;
1764                         kick_rdev_from_array(rdev);
1765                         md_update_sb(mddev);
1766                         md_new_event(mddev);
1767                         err = 0;
1768                 }
1769         }
1770         return err ? err : len;
1771 }
1772 static struct rdev_sysfs_entry
1773 rdev_state = __ATTR(state, 0644, state_show, state_store);
1774
1775 static ssize_t
1776 super_show(mdk_rdev_t *rdev, char *page)
1777 {
1778         if (rdev->sb_loaded && rdev->sb_size) {
1779                 memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
1780                 return rdev->sb_size;
1781         } else
1782                 return 0;
1783 }
1784 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
1785
1786 static ssize_t
1787 errors_show(mdk_rdev_t *rdev, char *page)
1788 {
1789         return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
1790 }
1791
1792 static ssize_t
1793 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1794 {
1795         char *e;
1796         unsigned long n = simple_strtoul(buf, &e, 10);
1797         if (*buf && (*e == 0 || *e == '\n')) {
1798                 atomic_set(&rdev->corrected_errors, n);
1799                 return len;
1800         }
1801         return -EINVAL;
1802 }
1803 static struct rdev_sysfs_entry rdev_errors =
1804 __ATTR(errors, 0644, errors_show, errors_store);
1805
1806 static ssize_t
1807 slot_show(mdk_rdev_t *rdev, char *page)
1808 {
1809         if (rdev->raid_disk < 0)
1810                 return sprintf(page, "none\n");
1811         else
1812                 return sprintf(page, "%d\n", rdev->raid_disk);
1813 }
1814
1815 static ssize_t
1816 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1817 {
1818         char *e;
1819         int slot = simple_strtoul(buf, &e, 10);
1820         if (strncmp(buf, "none", 4)==0)
1821                 slot = -1;
1822         else if (e==buf || (*e && *e!= '\n'))
1823                 return -EINVAL;
1824         if (rdev->mddev->pers)
1825                 /* Cannot set slot in active array (yet) */
1826                 return -EBUSY;
1827         if (slot >= rdev->mddev->raid_disks)
1828                 return -ENOSPC;
1829         rdev->raid_disk = slot;
1830         /* assume it is working */
1831         rdev->flags = 0;
1832         set_bit(In_sync, &rdev->flags);
1833         return len;
1834 }
1835
1836
1837 static struct rdev_sysfs_entry rdev_slot =
1838 __ATTR(slot, 0644, slot_show, slot_store);
1839
1840 static ssize_t
1841 offset_show(mdk_rdev_t *rdev, char *page)
1842 {
1843         return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
1844 }
1845
1846 static ssize_t
1847 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1848 {
1849         char *e;
1850         unsigned long long offset = simple_strtoull(buf, &e, 10);
1851         if (e==buf || (*e && *e != '\n'))
1852                 return -EINVAL;
1853         if (rdev->mddev->pers)
1854                 return -EBUSY;
1855         rdev->data_offset = offset;
1856         return len;
1857 }
1858
1859 static struct rdev_sysfs_entry rdev_offset =
1860 __ATTR(offset, 0644, offset_show, offset_store);
1861
1862 static ssize_t
1863 rdev_size_show(mdk_rdev_t *rdev, char *page)
1864 {
1865         return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
1866 }
1867
1868 static ssize_t
1869 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
1870 {
1871         char *e;
1872         unsigned long long size = simple_strtoull(buf, &e, 10);
1873         if (e==buf || (*e && *e != '\n'))
1874                 return -EINVAL;
1875         if (rdev->mddev->pers)
1876                 return -EBUSY;
1877         rdev->size = size;
1878         if (size < rdev->mddev->size || rdev->mddev->size == 0)
1879                 rdev->mddev->size = size;
1880         return len;
1881 }
1882
1883 static struct rdev_sysfs_entry rdev_size =
1884 __ATTR(size, 0644, rdev_size_show, rdev_size_store);
1885
1886 static struct attribute *rdev_default_attrs[] = {
1887         &rdev_state.attr,
1888         &rdev_super.attr,
1889         &rdev_errors.attr,
1890         &rdev_slot.attr,
1891         &rdev_offset.attr,
1892         &rdev_size.attr,
1893         NULL,
1894 };
1895 static ssize_t
1896 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1897 {
1898         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1899         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1900
1901         if (!entry->show)
1902                 return -EIO;
1903         return entry->show(rdev, page);
1904 }
1905
1906 static ssize_t
1907 rdev_attr_store(struct kobject *kobj, struct attribute *attr,
1908               const char *page, size_t length)
1909 {
1910         struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
1911         mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
1912
1913         if (!entry->store)
1914                 return -EIO;
1915         return entry->store(rdev, page, length);
1916 }
1917
1918 static void rdev_free(struct kobject *ko)
1919 {
1920         mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
1921         kfree(rdev);
1922 }
1923 static struct sysfs_ops rdev_sysfs_ops = {
1924         .show           = rdev_attr_show,
1925         .store          = rdev_attr_store,
1926 };
1927 static struct kobj_type rdev_ktype = {
1928         .release        = rdev_free,
1929         .sysfs_ops      = &rdev_sysfs_ops,
1930         .default_attrs  = rdev_default_attrs,
1931 };
1932
1933 /*
1934  * Import a device. If 'super_format' >= 0, then sanity check the superblock
1935  *
1936  * mark the device faulty if:
1937  *
1938  *   - the device is nonexistent (zero size)
1939  *   - the device has no valid superblock
1940  *
1941  * a faulty rdev _never_ has rdev->sb set.
1942  */
1943 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor)
1944 {
1945         char b[BDEVNAME_SIZE];
1946         int err;
1947         mdk_rdev_t *rdev;
1948         sector_t size;
1949
1950         rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1951         if (!rdev) {
1952                 printk(KERN_ERR "md: could not alloc mem for new device!\n");
1953                 return ERR_PTR(-ENOMEM);
1954         }
1955
1956         if ((err = alloc_disk_sb(rdev)))
1957                 goto abort_free;
1958
1959         err = lock_rdev(rdev, newdev);
1960         if (err)
1961                 goto abort_free;
1962
1963         rdev->kobj.parent = NULL;
1964         rdev->kobj.ktype = &rdev_ktype;
1965         kobject_init(&rdev->kobj);
1966
1967         rdev->desc_nr = -1;
1968         rdev->flags = 0;
1969         rdev->data_offset = 0;
1970         rdev->sb_events = 0;
1971         atomic_set(&rdev->nr_pending, 0);
1972         atomic_set(&rdev->read_errors, 0);
1973         atomic_set(&rdev->corrected_errors, 0);
1974
1975         size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
1976         if (!size) {
1977                 printk(KERN_WARNING 
1978                         "md: %s has zero or unknown size, marking faulty!\n",
1979                         bdevname(rdev->bdev,b));
1980                 err = -EINVAL;
1981                 goto abort_free;
1982         }
1983
1984         if (super_format >= 0) {
1985                 err = super_types[super_format].
1986                         load_super(rdev, NULL, super_minor);
1987                 if (err == -EINVAL) {
1988                         printk(KERN_WARNING 
1989                                 "md: %s has invalid sb, not importing!\n",
1990                                 bdevname(rdev->bdev,b));
1991                         goto abort_free;
1992                 }
1993                 if (err < 0) {
1994                         printk(KERN_WARNING 
1995                                 "md: could not read %s's sb, not importing!\n",
1996                                 bdevname(rdev->bdev,b));
1997                         goto abort_free;
1998                 }
1999         }
2000         INIT_LIST_HEAD(&rdev->same_set);
2001
2002         return rdev;
2003
2004 abort_free:
2005         if (rdev->sb_page) {
2006                 if (rdev->bdev)
2007                         unlock_rdev(rdev);
2008                 free_disk_sb(rdev);
2009         }
2010         kfree(rdev);
2011         return ERR_PTR(err);
2012 }
2013
2014 /*
2015  * Check a full RAID array for plausibility
2016  */
2017
2018
2019 static void analyze_sbs(mddev_t * mddev)
2020 {
2021         int i;
2022         struct list_head *tmp;
2023         mdk_rdev_t *rdev, *freshest;
2024         char b[BDEVNAME_SIZE];
2025
2026         freshest = NULL;
2027         ITERATE_RDEV(mddev,rdev,tmp)
2028                 switch (super_types[mddev->major_version].
2029                         load_super(rdev, freshest, mddev->minor_version)) {
2030                 case 1:
2031                         freshest = rdev;
2032                         break;
2033                 case 0:
2034                         break;
2035                 default:
2036                         printk( KERN_ERR \
2037                                 "md: fatal superblock inconsistency in %s"
2038                                 " -- removing from array\n", 
2039                                 bdevname(rdev->bdev,b));
2040                         kick_rdev_from_array(rdev);
2041                 }
2042
2043
2044         super_types[mddev->major_version].
2045                 validate_super(mddev, freshest);
2046
2047         i = 0;
2048         ITERATE_RDEV(mddev,rdev,tmp) {
2049                 if (rdev != freshest)
2050                         if (super_types[mddev->major_version].
2051                             validate_super(mddev, rdev)) {
2052                                 printk(KERN_WARNING "md: kicking non-fresh %s"
2053                                         " from array!\n",
2054                                         bdevname(rdev->bdev,b));
2055                                 kick_rdev_from_array(rdev);
2056                                 continue;
2057                         }
2058                 if (mddev->level == LEVEL_MULTIPATH) {
2059                         rdev->desc_nr = i++;
2060                         rdev->raid_disk = rdev->desc_nr;
2061                         set_bit(In_sync, &rdev->flags);
2062                 }
2063         }
2064
2065
2066
2067         if (mddev->recovery_cp != MaxSector &&
2068             mddev->level >= 1)
2069                 printk(KERN_ERR "md: %s: raid array is not clean"
2070                        " -- starting background reconstruction\n",
2071                        mdname(mddev));
2072
2073 }
2074
2075 static ssize_t
2076 safe_delay_show(mddev_t *mddev, char *page)
2077 {
2078         int msec = (mddev->safemode_delay*1000)/HZ;
2079         return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
2080 }
2081 static ssize_t
2082 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len)
2083 {
2084         int scale=1;
2085         int dot=0;
2086         int i;
2087         unsigned long msec;
2088         char buf[30];
2089         char *e;
2090         /* remove a period, and count digits after it */
2091         if (len >= sizeof(buf))
2092                 return -EINVAL;
2093         strlcpy(buf, cbuf, len);
2094         buf[len] = 0;
2095         for (i=0; i<len; i++) {
2096                 if (dot) {
2097                         if (isdigit(buf[i])) {
2098                                 buf[i-1] = buf[i];
2099                                 scale *= 10;
2100                         }
2101                         buf[i] = 0;
2102                 } else if (buf[i] == '.') {
2103                         dot=1;
2104                         buf[i] = 0;
2105                 }
2106         }
2107         msec = simple_strtoul(buf, &e, 10);
2108         if (e == buf || (*e && *e != '\n'))
2109                 return -EINVAL;
2110         msec = (msec * 1000) / scale;
2111         if (msec == 0)
2112                 mddev->safemode_delay = 0;
2113         else {
2114                 mddev->safemode_delay = (msec*HZ)/1000;
2115                 if (mddev->safemode_delay == 0)
2116                         mddev->safemode_delay = 1;
2117         }
2118         return len;
2119 }
2120 static struct md_sysfs_entry md_safe_delay =
2121 __ATTR(safe_mode_delay, 0644,safe_delay_show, safe_delay_store);
2122
2123 static ssize_t
2124 level_show(mddev_t *mddev, char *page)
2125 {
2126         struct mdk_personality *p = mddev->pers;
2127         if (p)
2128                 return sprintf(page, "%s\n", p->name);
2129         else if (mddev->clevel[0])
2130                 return sprintf(page, "%s\n", mddev->clevel);
2131         else if (mddev->level != LEVEL_NONE)
2132                 return sprintf(page, "%d\n", mddev->level);
2133         else
2134                 return 0;
2135 }
2136
2137 static ssize_t
2138 level_store(mddev_t *mddev, const char *buf, size_t len)
2139 {
2140         int rv = len;
2141         if (mddev->pers)
2142                 return -EBUSY;
2143         if (len == 0)
2144                 return 0;
2145         if (len >= sizeof(mddev->clevel))
2146                 return -ENOSPC;
2147         strncpy(mddev->clevel, buf, len);
2148         if (mddev->clevel[len-1] == '\n')
2149                 len--;
2150         mddev->clevel[len] = 0;
2151         mddev->level = LEVEL_NONE;
2152         return rv;
2153 }
2154
2155 static struct md_sysfs_entry md_level =
2156 __ATTR(level, 0644, level_show, level_store);
2157
2158 static ssize_t
2159 raid_disks_show(mddev_t *mddev, char *page)
2160 {
2161         if (mddev->raid_disks == 0)
2162                 return 0;
2163         return sprintf(page, "%d\n", mddev->raid_disks);
2164 }
2165
2166 static int update_raid_disks(mddev_t *mddev, int raid_disks);
2167
2168 static ssize_t
2169 raid_disks_store(mddev_t *mddev, const char *buf, size_t len)
2170 {
2171         /* can only set raid_disks if array is not yet active */
2172         char *e;
2173         int rv = 0;
2174         unsigned long n = simple_strtoul(buf, &e, 10);
2175
2176         if (!*buf || (*e && *e != '\n'))
2177                 return -EINVAL;
2178
2179         if (mddev->pers)
2180                 rv = update_raid_disks(mddev, n);
2181         else
2182                 mddev->raid_disks = n;
2183         return rv ? rv : len;
2184 }
2185 static struct md_sysfs_entry md_raid_disks =
2186 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store);
2187
2188 static ssize_t
2189 chunk_size_show(mddev_t *mddev, char *page)
2190 {
2191         return sprintf(page, "%d\n", mddev->chunk_size);
2192 }
2193
2194 static ssize_t
2195 chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
2196 {
2197         /* can only set chunk_size if array is not yet active */
2198         char *e;
2199         unsigned long n = simple_strtoul(buf, &e, 10);
2200
2201         if (mddev->pers)
2202                 return -EBUSY;
2203         if (!*buf || (*e && *e != '\n'))
2204                 return -EINVAL;
2205
2206         mddev->chunk_size = n;
2207         return len;
2208 }
2209 static struct md_sysfs_entry md_chunk_size =
2210 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store);
2211
2212 /*
2213  * The array state can be:
2214  *
2215  * clear
2216  *     No devices, no size, no level
2217  *     Equivalent to STOP_ARRAY ioctl
2218  * inactive
2219  *     May have some settings, but array is not active
2220  *        all IO results in error
2221  *     When written, doesn't tear down array, but just stops it
2222  * suspended (not supported yet)
2223  *     All IO requests will block. The array can be reconfigured.
2224  *     Writing this, if accepted, will block until array is quiessent
2225  * readonly
2226  *     no resync can happen.  no superblocks get written.
2227  *     write requests fail
2228  * read-auto
2229  *     like readonly, but behaves like 'clean' on a write request.
2230  *
2231  * clean - no pending writes, but otherwise active.
2232  *     When written to inactive array, starts without resync
2233  *     If a write request arrives then
2234  *       if metadata is known, mark 'dirty' and switch to 'active'.
2235  *       if not known, block and switch to write-pending
2236  *     If written to an active array that has pending writes, then fails.
2237  * active
2238  *     fully active: IO and resync can be happening.
2239  *     When written to inactive array, starts with resync
2240  *
2241  * write-pending
2242  *     clean, but writes are blocked waiting for 'active' to be written.
2243  *
2244  * active-idle
2245  *     like active, but no writes have been seen for a while (100msec).
2246  *
2247  */
2248 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
2249                    write_pending, active_idle, bad_word};
2250 char *array_states[] = {
2251         "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
2252         "write-pending", "active-idle", NULL };
2253
2254 static int match_word(const char *word, char **list)
2255 {
2256         int n;
2257         for (n=0; list[n]; n++)
2258                 if (cmd_match(word, list[n]))
2259                         break;
2260         return n;
2261 }
2262
2263 static ssize_t
2264 array_state_show(mddev_t *mddev, char *page)
2265 {
2266         enum array_state st = inactive;
2267
2268         if (mddev->pers)
2269                 switch(mddev->ro) {
2270                 case 1:
2271                         st = readonly;
2272                         break;
2273                 case 2:
2274                         st = read_auto;
2275                         break;
2276                 case 0:
2277                         if (mddev->in_sync)
2278                                 st = clean;
2279                         else if (mddev->safemode)
2280                                 st = active_idle;
2281                         else
2282                                 st = active;
2283                 }
2284         else {
2285                 if (list_empty(&mddev->disks) &&
2286                     mddev->raid_disks == 0 &&
2287                     mddev->size == 0)
2288                         st = clear;
2289                 else
2290                         st = inactive;
2291         }
2292         return sprintf(page, "%s\n", array_states[st]);
2293 }
2294
2295 static int do_md_stop(mddev_t * mddev, int ro);
2296 static int do_md_run(mddev_t * mddev);
2297 static int restart_array(mddev_t *mddev);
2298
2299 static ssize_t
2300 array_state_store(mddev_t *mddev, const char *buf, size_t len)
2301 {
2302         int err = -EINVAL;
2303         enum array_state st = match_word(buf, array_states);
2304         switch(st) {
2305         case bad_word:
2306                 break;
2307         case clear:
2308                 /* stopping an active array */
2309                 if (mddev->pers) {
2310                         if (atomic_read(&mddev->active) > 1)
2311                                 return -EBUSY;
2312                         err = do_md_stop(mddev, 0);
2313                 }
2314                 break;
2315         case inactive:
2316                 /* stopping an active array */
2317                 if (mddev->pers) {
2318                         if (atomic_read(&mddev->active) > 1)
2319                                 return -EBUSY;
2320                         err = do_md_stop(mddev, 2);
2321                 }
2322                 break;
2323         case suspended:
2324                 break; /* not supported yet */
2325         case readonly:
2326                 if (mddev->pers)
2327                         err = do_md_stop(mddev, 1);
2328                 else {
2329                         mddev->ro = 1;
2330                         err = do_md_run(mddev);
2331                 }
2332                 break;
2333         case read_auto:
2334                 /* stopping an active array */
2335                 if (mddev->pers) {
2336                         err = do_md_stop(mddev, 1);
2337                         if (err == 0)
2338                                 mddev->ro = 2; /* FIXME mark devices writable */
2339                 } else {
2340                         mddev->ro = 2;
2341                         err = do_md_run(mddev);
2342                 }
2343                 break;
2344         case clean:
2345                 if (mddev->pers) {
2346                         restart_array(mddev);
2347                         spin_lock_irq(&mddev->write_lock);
2348                         if (atomic_read(&mddev->writes_pending) == 0) {
2349                                 mddev->in_sync = 1;
2350                                 mddev->sb_dirty = 1;
2351                         }
2352                         spin_unlock_irq(&mddev->write_lock);
2353                 } else {
2354                         mddev->ro = 0;
2355                         mddev->recovery_cp = MaxSector;
2356                         err = do_md_run(mddev);
2357                 }
2358                 break;
2359         case active:
2360                 if (mddev->pers) {
2361                         restart_array(mddev);
2362                         mddev->sb_dirty = 0;
2363                         wake_up(&mddev->sb_wait);
2364                         err = 0;
2365                 } else {
2366                         mddev->ro = 0;
2367                         err = do_md_run(mddev);
2368                 }
2369                 break;
2370         case write_pending:
2371         case active_idle:
2372                 /* these cannot be set */
2373                 break;
2374         }
2375         if (err)
2376                 return err;
2377         else
2378                 return len;
2379 }
2380 static struct md_sysfs_entry md_array_state = __ATTR(array_state, 0644, array_state_show, array_state_store);
2381
2382 static ssize_t
2383 null_show(mddev_t *mddev, char *page)
2384 {
2385         return -EINVAL;
2386 }
2387
2388 static ssize_t
2389 new_dev_store(mddev_t *mddev, const char *buf, size_t len)
2390 {
2391         /* buf must be %d:%d\n? giving major and minor numbers */
2392         /* The new device is added to the array.
2393          * If the array has a persistent superblock, we read the
2394          * superblock to initialise info and check validity.
2395          * Otherwise, only checking done is that in bind_rdev_to_array,
2396          * which mainly checks size.
2397          */
2398         char *e;
2399         int major = simple_strtoul(buf, &e, 10);
2400         int minor;
2401         dev_t dev;
2402         mdk_rdev_t *rdev;
2403         int err;
2404
2405         if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
2406                 return -EINVAL;
2407         minor = simple_strtoul(e+1, &e, 10);
2408         if (*e && *e != '\n')
2409                 return -EINVAL;
2410         dev = MKDEV(major, minor);
2411         if (major != MAJOR(dev) ||
2412             minor != MINOR(dev))
2413                 return -EOVERFLOW;
2414
2415
2416         if (mddev->persistent) {
2417                 rdev = md_import_device(dev, mddev->major_version,
2418                                         mddev->minor_version);
2419                 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
2420                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
2421                                                        mdk_rdev_t, same_set);
2422                         err = super_types[mddev->major_version]
2423                                 .load_super(rdev, rdev0, mddev->minor_version);
2424                         if (err < 0)
2425                                 goto out;
2426                 }
2427         } else
2428                 rdev = md_import_device(dev, -1, -1);
2429
2430         if (IS_ERR(rdev))
2431                 return PTR_ERR(rdev);
2432         err = bind_rdev_to_array(rdev, mddev);
2433  out:
2434         if (err)
2435                 export_rdev(rdev);
2436         return err ? err : len;
2437 }
2438
2439 static struct md_sysfs_entry md_new_device =
2440 __ATTR(new_dev, 0200, null_show, new_dev_store);
2441
2442 static ssize_t
2443 size_show(mddev_t *mddev, char *page)
2444 {
2445         return sprintf(page, "%llu\n", (unsigned long long)mddev->size);
2446 }
2447
2448 static int update_size(mddev_t *mddev, unsigned long size);
2449
2450 static ssize_t
2451 size_store(mddev_t *mddev, const char *buf, size_t len)
2452 {
2453         /* If array is inactive, we can reduce the component size, but
2454          * not increase it (except from 0).
2455          * If array is active, we can try an on-line resize
2456          */
2457         char *e;
2458         int err = 0;
2459         unsigned long long size = simple_strtoull(buf, &e, 10);
2460         if (!*buf || *buf == '\n' ||
2461             (*e && *e != '\n'))
2462                 return -EINVAL;
2463
2464         if (mddev->pers) {
2465                 err = update_size(mddev, size);
2466                 md_update_sb(mddev);
2467         } else {
2468                 if (mddev->size == 0 ||
2469                     mddev->size > size)
2470                         mddev->size = size;
2471                 else
2472                         err = -ENOSPC;
2473         }
2474         return err ? err : len;
2475 }
2476
2477 static struct md_sysfs_entry md_size =
2478 __ATTR(component_size, 0644, size_show, size_store);
2479
2480
2481 /* Metdata version.
2482  * This is either 'none' for arrays with externally managed metadata,
2483  * or N.M for internally known formats
2484  */
2485 static ssize_t
2486 metadata_show(mddev_t *mddev, char *page)
2487 {
2488         if (mddev->persistent)
2489                 return sprintf(page, "%d.%d\n",
2490                                mddev->major_version, mddev->minor_version);
2491         else
2492                 return sprintf(page, "none\n");
2493 }
2494
2495 static ssize_t
2496 metadata_store(mddev_t *mddev, const char *buf, size_t len)
2497 {
2498         int major, minor;
2499         char *e;
2500         if (!list_empty(&mddev->disks))
2501                 return -EBUSY;
2502
2503         if (cmd_match(buf, "none")) {
2504                 mddev->persistent = 0;
2505                 mddev->major_version = 0;
2506                 mddev->minor_version = 90;
2507                 return len;
2508         }
2509         major = simple_strtoul(buf, &e, 10);
2510         if (e==buf || *e != '.')
2511                 return -EINVAL;
2512         buf = e+1;
2513         minor = simple_strtoul(buf, &e, 10);
2514         if (e==buf || *e != '\n')
2515                 return -EINVAL;
2516         if (major >= sizeof(super_types)/sizeof(super_types[0]) ||
2517             super_types[major].name == NULL)
2518                 return -ENOENT;
2519         mddev->major_version = major;
2520         mddev->minor_version = minor;
2521         mddev->persistent = 1;
2522         return len;
2523 }
2524
2525 static struct md_sysfs_entry md_metadata =
2526 __ATTR(metadata_version, 0644, metadata_show, metadata_store);
2527
2528 static ssize_t
2529 action_show(mddev_t *mddev, char *page)
2530 {
2531         char *type = "idle";
2532         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2533             test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
2534                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2535                         type = "reshape";
2536                 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2537                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2538                                 type = "resync";
2539                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2540                                 type = "check";
2541                         else
2542                                 type = "repair";
2543                 } else
2544                         type = "recover";
2545         }
2546         return sprintf(page, "%s\n", type);
2547 }
2548
2549 static ssize_t
2550 action_store(mddev_t *mddev, const char *page, size_t len)
2551 {
2552         if (!mddev->pers || !mddev->pers->sync_request)
2553                 return -EINVAL;
2554
2555         if (cmd_match(page, "idle")) {
2556                 if (mddev->sync_thread) {
2557                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2558                         md_unregister_thread(mddev->sync_thread);
2559                         mddev->sync_thread = NULL;
2560                         mddev->recovery = 0;
2561                 }
2562         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
2563                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
2564                 return -EBUSY;
2565         else if (cmd_match(page, "resync") || cmd_match(page, "recover"))
2566                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2567         else if (cmd_match(page, "reshape")) {
2568                 int err;
2569                 if (mddev->pers->start_reshape == NULL)
2570                         return -EINVAL;
2571                 err = mddev->pers->start_reshape(mddev);
2572                 if (err)
2573                         return err;
2574         } else {
2575                 if (cmd_match(page, "check"))
2576                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
2577                 else if (!cmd_match(page, "repair"))
2578                         return -EINVAL;
2579                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
2580                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
2581         }
2582         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
2583         md_wakeup_thread(mddev->thread);
2584         return len;
2585 }
2586
2587 static ssize_t
2588 mismatch_cnt_show(mddev_t *mddev, char *page)
2589 {
2590         return sprintf(page, "%llu\n",
2591                        (unsigned long long) mddev->resync_mismatches);
2592 }
2593
2594 static struct md_sysfs_entry
2595 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
2596
2597
2598 static struct md_sysfs_entry
2599 md_mismatches = __ATTR_RO(mismatch_cnt);
2600
2601 static ssize_t
2602 sync_min_show(mddev_t *mddev, char *page)
2603 {
2604         return sprintf(page, "%d (%s)\n", speed_min(mddev),
2605                        mddev->sync_speed_min ? "local": "system");
2606 }
2607
2608 static ssize_t
2609 sync_min_store(mddev_t *mddev, const char *buf, size_t len)
2610 {
2611         int min;
2612         char *e;
2613         if (strncmp(buf, "system", 6)==0) {
2614                 mddev->sync_speed_min = 0;
2615                 return len;
2616         }
2617         min = simple_strtoul(buf, &e, 10);
2618         if (buf == e || (*e && *e != '\n') || min <= 0)
2619                 return -EINVAL;
2620         mddev->sync_speed_min = min;
2621         return len;
2622 }
2623
2624 static struct md_sysfs_entry md_sync_min =
2625 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
2626
2627 static ssize_t
2628 sync_max_show(mddev_t *mddev, char *page)
2629 {
2630         return sprintf(page, "%d (%s)\n", speed_max(mddev),
2631                        mddev->sync_speed_max ? "local": "system");
2632 }
2633
2634 static ssize_t
2635 sync_max_store(mddev_t *mddev, const char *buf, size_t len)
2636 {
2637         int max;
2638         char *e;
2639         if (strncmp(buf, "system", 6)==0) {
2640                 mddev->sync_speed_max = 0;
2641                 return len;
2642         }
2643         max = simple_strtoul(buf, &e, 10);
2644         if (buf == e || (*e && *e != '\n') || max <= 0)
2645                 return -EINVAL;
2646         mddev->sync_speed_max = max;
2647         return len;
2648 }
2649
2650 static struct md_sysfs_entry md_sync_max =
2651 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
2652
2653
2654 static ssize_t
2655 sync_speed_show(mddev_t *mddev, char *page)
2656 {
2657         unsigned long resync, dt, db;
2658         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2659         dt = ((jiffies - mddev->resync_mark) / HZ);
2660         if (!dt) dt++;
2661         db = resync - (mddev->resync_mark_cnt);
2662         return sprintf(page, "%ld\n", db/dt/2); /* K/sec */
2663 }
2664
2665 static struct md_sysfs_entry
2666 md_sync_speed = __ATTR_RO(sync_speed);
2667
2668 static ssize_t
2669 sync_completed_show(mddev_t *mddev, char *page)
2670 {
2671         unsigned long max_blocks, resync;
2672
2673         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2674                 max_blocks = mddev->resync_max_sectors;
2675         else
2676                 max_blocks = mddev->size << 1;
2677
2678         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
2679         return sprintf(page, "%lu / %lu\n", resync, max_blocks);
2680 }
2681
2682 static struct md_sysfs_entry
2683 md_sync_completed = __ATTR_RO(sync_completed);
2684
2685 static ssize_t
2686 suspend_lo_show(mddev_t *mddev, char *page)
2687 {
2688         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
2689 }
2690
2691 static ssize_t
2692 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len)
2693 {
2694         char *e;
2695         unsigned long long new = simple_strtoull(buf, &e, 10);
2696
2697         if (mddev->pers->quiesce == NULL)
2698                 return -EINVAL;
2699         if (buf == e || (*e && *e != '\n'))
2700                 return -EINVAL;
2701         if (new >= mddev->suspend_hi ||
2702             (new > mddev->suspend_lo && new < mddev->suspend_hi)) {
2703                 mddev->suspend_lo = new;
2704                 mddev->pers->quiesce(mddev, 2);
2705                 return len;
2706         } else
2707                 return -EINVAL;
2708 }
2709 static struct md_sysfs_entry md_suspend_lo =
2710 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
2711
2712
2713 static ssize_t
2714 suspend_hi_show(mddev_t *mddev, char *page)
2715 {
2716         return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
2717 }
2718
2719 static ssize_t
2720 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len)
2721 {
2722         char *e;
2723         unsigned long long new = simple_strtoull(buf, &e, 10);
2724
2725         if (mddev->pers->quiesce == NULL)
2726                 return -EINVAL;
2727         if (buf == e || (*e && *e != '\n'))
2728                 return -EINVAL;
2729         if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) ||
2730             (new > mddev->suspend_lo && new > mddev->suspend_hi)) {
2731                 mddev->suspend_hi = new;
2732                 mddev->pers->quiesce(mddev, 1);
2733                 mddev->pers->quiesce(mddev, 0);
2734                 return len;
2735         } else
2736                 return -EINVAL;
2737 }
2738 static struct md_sysfs_entry md_suspend_hi =
2739 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
2740
2741
2742 static struct attribute *md_default_attrs[] = {
2743         &md_level.attr,
2744         &md_raid_disks.attr,
2745         &md_chunk_size.attr,
2746         &md_size.attr,
2747         &md_metadata.attr,
2748         &md_new_device.attr,
2749         &md_safe_delay.attr,
2750         &md_array_state.attr,
2751         NULL,
2752 };
2753
2754 static struct attribute *md_redundancy_attrs[] = {
2755         &md_scan_mode.attr,
2756         &md_mismatches.attr,
2757         &md_sync_min.attr,
2758         &md_sync_max.attr,
2759         &md_sync_speed.attr,
2760         &md_sync_completed.attr,
2761         &md_suspend_lo.attr,
2762         &md_suspend_hi.attr,
2763         NULL,
2764 };
2765 static struct attribute_group md_redundancy_group = {
2766         .name = NULL,
2767         .attrs = md_redundancy_attrs,
2768 };
2769
2770
2771 static ssize_t
2772 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
2773 {
2774         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2775         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2776         ssize_t rv;
2777
2778         if (!entry->show)
2779                 return -EIO;
2780         rv = mddev_lock(mddev);
2781         if (!rv) {
2782                 rv = entry->show(mddev, page);
2783                 mddev_unlock(mddev);
2784         }
2785         return rv;
2786 }
2787
2788 static ssize_t
2789 md_attr_store(struct kobject *kobj, struct attribute *attr,
2790               const char *page, size_t length)
2791 {
2792         struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
2793         mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
2794         ssize_t rv;
2795
2796         if (!entry->store)
2797                 return -EIO;
2798         rv = mddev_lock(mddev);
2799         if (!rv) {
2800                 rv = entry->store(mddev, page, length);
2801                 mddev_unlock(mddev);
2802         }
2803         return rv;
2804 }
2805
2806 static void md_free(struct kobject *ko)
2807 {
2808         mddev_t *mddev = container_of(ko, mddev_t, kobj);
2809         kfree(mddev);
2810 }
2811
2812 static struct sysfs_ops md_sysfs_ops = {
2813         .show   = md_attr_show,
2814         .store  = md_attr_store,
2815 };
2816 static struct kobj_type md_ktype = {
2817         .release        = md_free,
2818         .sysfs_ops      = &md_sysfs_ops,
2819         .default_attrs  = md_default_attrs,
2820 };
2821
2822 int mdp_major = 0;
2823
2824 static struct kobject *md_probe(dev_t dev, int *part, void *data)
2825 {
2826         static DEFINE_MUTEX(disks_mutex);
2827         mddev_t *mddev = mddev_find(dev);
2828         struct gendisk *disk;
2829         int partitioned = (MAJOR(dev) != MD_MAJOR);
2830         int shift = partitioned ? MdpMinorShift : 0;
2831         int unit = MINOR(dev) >> shift;
2832
2833         if (!mddev)
2834                 return NULL;
2835
2836         mutex_lock(&disks_mutex);
2837         if (mddev->gendisk) {
2838                 mutex_unlock(&disks_mutex);
2839                 mddev_put(mddev);
2840                 return NULL;
2841         }
2842         disk = alloc_disk(1 << shift);
2843         if (!disk) {
2844                 mutex_unlock(&disks_mutex);
2845                 mddev_put(mddev);
2846                 return NULL;
2847         }
2848         disk->major = MAJOR(dev);
2849         disk->first_minor = unit << shift;
2850         if (partitioned) {
2851                 sprintf(disk->disk_name, "md_d%d", unit);
2852                 sprintf(disk->devfs_name, "md/d%d", unit);
2853         } else {
2854                 sprintf(disk->disk_name, "md%d", unit);
2855                 sprintf(disk->devfs_name, "md/%d", unit);
2856         }
2857         disk->fops = &md_fops;
2858         disk->private_data = mddev;
2859         disk->queue = mddev->queue;
2860         add_disk(disk);
2861         mddev->gendisk = disk;
2862         mutex_unlock(&disks_mutex);
2863         mddev->kobj.parent = &disk->kobj;
2864         mddev->kobj.k_name = NULL;
2865         snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md");
2866         mddev->kobj.ktype = &md_ktype;
2867         kobject_register(&mddev->kobj);
2868         return NULL;
2869 }
2870
2871 static void md_safemode_timeout(unsigned long data)
2872 {
2873         mddev_t *mddev = (mddev_t *) data;
2874
2875         mddev->safemode = 1;
2876         md_wakeup_thread(mddev->thread);
2877 }
2878
2879 static int start_dirty_degraded;
2880
2881 static int do_md_run(mddev_t * mddev)
2882 {
2883         int err;
2884         int chunk_size;
2885         struct list_head *tmp;
2886         mdk_rdev_t *rdev;
2887         struct gendisk *disk;
2888         struct mdk_personality *pers;
2889         char b[BDEVNAME_SIZE];
2890
2891         if (list_empty(&mddev->disks))
2892                 /* cannot run an array with no devices.. */
2893                 return -EINVAL;
2894
2895         if (mddev->pers)
2896                 return -EBUSY;
2897
2898         /*
2899          * Analyze all RAID superblock(s)
2900          */
2901         if (!mddev->raid_disks)
2902                 analyze_sbs(mddev);
2903
2904         chunk_size = mddev->chunk_size;
2905
2906         if (chunk_size) {
2907                 if (chunk_size > MAX_CHUNK_SIZE) {
2908                         printk(KERN_ERR "too big chunk_size: %d > %d\n",
2909                                 chunk_size, MAX_CHUNK_SIZE);
2910                         return -EINVAL;
2911                 }
2912                 /*
2913                  * chunk-size has to be a power of 2 and multiples of PAGE_SIZE
2914                  */
2915                 if ( (1 << ffz(~chunk_size)) != chunk_size) {
2916                         printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
2917                         return -EINVAL;
2918                 }
2919                 if (chunk_size < PAGE_SIZE) {
2920                         printk(KERN_ERR "too small chunk_size: %d < %ld\n",
2921                                 chunk_size, PAGE_SIZE);
2922                         return -EINVAL;
2923                 }
2924
2925                 /* devices must have minimum size of one chunk */
2926                 ITERATE_RDEV(mddev,rdev,tmp) {
2927                         if (test_bit(Faulty, &rdev->flags))
2928                                 continue;
2929                         if (rdev->size < chunk_size / 1024) {
2930                                 printk(KERN_WARNING
2931                                         "md: Dev %s smaller than chunk_size:"
2932                                         " %lluk < %dk\n",
2933                                         bdevname(rdev->bdev,b),
2934                                         (unsigned long long)rdev->size,
2935                                         chunk_size / 1024);
2936                                 return -EINVAL;
2937                         }
2938                 }
2939         }
2940
2941 #ifdef CONFIG_KMOD
2942         if (mddev->level != LEVEL_NONE)
2943                 request_module("md-level-%d", mddev->level);
2944         else if (mddev->clevel[0])
2945                 request_module("md-%s", mddev->clevel);
2946 #endif
2947
2948         /*
2949          * Drop all container device buffers, from now on
2950          * the only valid external interface is through the md
2951          * device.
2952          * Also find largest hardsector size
2953          */
2954         ITERATE_RDEV(mddev,rdev,tmp) {
2955                 if (test_bit(Faulty, &rdev->flags))
2956                         continue;
2957                 sync_blockdev(rdev->bdev);
2958                 invalidate_bdev(rdev->bdev, 0);
2959         }
2960
2961         md_probe(mddev->unit, NULL, NULL);
2962         disk = mddev->gendisk;
2963         if (!disk)
2964                 return -ENOMEM;
2965
2966         spin_lock(&pers_lock);
2967         pers = find_pers(mddev->level, mddev->clevel);
2968         if (!pers || !try_module_get(pers->owner)) {
2969                 spin_unlock(&pers_lock);
2970                 if (mddev->level != LEVEL_NONE)
2971                         printk(KERN_WARNING "md: personality for level %d is not loaded!\n",
2972                                mddev->level);
2973                 else
2974                         printk(KERN_WARNING "md: personality for level %s is not loaded!\n",
2975                                mddev->clevel);
2976                 return -EINVAL;
2977         }
2978         mddev->pers = pers;
2979         spin_unlock(&pers_lock);
2980         mddev->level = pers->level;
2981         strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
2982
2983         if (mddev->reshape_position != MaxSector &&
2984             pers->start_reshape == NULL) {
2985                 /* This personality cannot handle reshaping... */
2986                 mddev->pers = NULL;
2987                 module_put(pers->owner);
2988                 return -EINVAL;
2989         }
2990
2991         mddev->recovery = 0;
2992         mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */
2993         mddev->barriers_work = 1;
2994         mddev->ok_start_degraded = start_dirty_degraded;
2995
2996         if (start_readonly)
2997                 mddev->ro = 2; /* read-only, but switch on first write */
2998
2999         err = mddev->pers->run(mddev);
3000         if (!err && mddev->pers->sync_request) {
3001                 err = bitmap_create(mddev);
3002                 if (err) {
3003                         printk(KERN_ERR "%s: failed to create bitmap (%d)\n",
3004                                mdname(mddev), err);
3005                         mddev->pers->stop(mddev);
3006                 }
3007         }
3008         if (err) {
3009                 printk(KERN_ERR "md: pers->run() failed ...\n");
3010                 module_put(mddev->pers->owner);
3011                 mddev->pers = NULL;
3012                 bitmap_destroy(mddev);
3013                 return err;
3014         }
3015         if (mddev->pers->sync_request)
3016                 sysfs_create_group(&mddev->kobj, &md_redundancy_group);
3017         else if (mddev->ro == 2) /* auto-readonly not meaningful */
3018                 mddev->ro = 0;
3019
3020         atomic_set(&mddev->writes_pending,0);
3021         mddev->safemode = 0;
3022         mddev->safemode_timer.function = md_safemode_timeout;
3023         mddev->safemode_timer.data = (unsigned long) mddev;
3024         mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
3025         mddev->in_sync = 1;
3026
3027         ITERATE_RDEV(mddev,rdev,tmp)
3028                 if (rdev->raid_disk >= 0) {
3029                         char nm[20];
3030                         sprintf(nm, "rd%d", rdev->raid_disk);
3031                         sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
3032                 }
3033         
3034         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3035         md_wakeup_thread(mddev->thread);
3036         
3037         if (mddev->sb_dirty)
3038                 md_update_sb(mddev);
3039
3040         set_capacity(disk, mddev->array_size<<1);
3041
3042         /* If we call blk_queue_make_request here, it will
3043          * re-initialise max_sectors etc which may have been
3044          * refined inside -> run.  So just set the bits we need to set.
3045          * Most initialisation happended when we called
3046          * blk_queue_make_request(..., md_fail_request)
3047          * earlier.
3048          */
3049         mddev->queue->queuedata = mddev;
3050         mddev->queue->make_request_fn = mddev->pers->make_request;
3051
3052         /* If there is a partially-recovered drive we need to
3053          * start recovery here.  If we leave it to md_check_recovery,
3054          * it will remove the drives and not do the right thing
3055          */
3056         if (mddev->degraded) {
3057                 struct list_head *rtmp;
3058                 int spares = 0;
3059                 ITERATE_RDEV(mddev,rdev,rtmp)
3060                         if (rdev->raid_disk >= 0 &&
3061                             !test_bit(In_sync, &rdev->flags) &&
3062                             !test_bit(Faulty, &rdev->flags))
3063                                 /* complete an interrupted recovery */
3064                                 spares++;
3065                 if (spares && mddev->pers->sync_request) {
3066                         mddev->recovery = 0;
3067                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3068                         mddev->sync_thread = md_register_thread(md_do_sync,
3069                                                                 mddev,
3070                                                                 "%s_resync");
3071                         if (!mddev->sync_thread) {
3072                                 printk(KERN_ERR "%s: could not start resync"
3073                                        " thread...\n",
3074                                        mdname(mddev));
3075                                 /* leave the spares where they are, it shouldn't hurt */
3076                                 mddev->recovery = 0;
3077                         } else
3078                                 md_wakeup_thread(mddev->sync_thread);
3079                 }
3080         }
3081
3082         mddev->changed = 1;
3083         md_new_event(mddev);
3084         return 0;
3085 }
3086
3087 static int restart_array(mddev_t *mddev)
3088 {
3089         struct gendisk *disk = mddev->gendisk;
3090         int err;
3091
3092         /*
3093          * Complain if it has no devices
3094          */
3095         err = -ENXIO;
3096         if (list_empty(&mddev->disks))
3097                 goto out;
3098
3099         if (mddev->pers) {
3100                 err = -EBUSY;
3101                 if (!mddev->ro)
3102                         goto out;
3103
3104                 mddev->safemode = 0;
3105                 mddev->ro = 0;
3106                 set_disk_ro(disk, 0);
3107
3108                 printk(KERN_INFO "md: %s switched to read-write mode.\n",
3109                         mdname(mddev));
3110                 /*
3111                  * Kick recovery or resync if necessary
3112                  */
3113                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3114                 md_wakeup_thread(mddev->thread);
3115                 md_wakeup_thread(mddev->sync_thread);
3116                 err = 0;
3117         } else
3118                 err = -EINVAL;
3119
3120 out:
3121         return err;
3122 }
3123
3124 /* similar to deny_write_access, but accounts for our holding a reference
3125  * to the file ourselves */
3126 static int deny_bitmap_write_access(struct file * file)
3127 {
3128         struct inode *inode = file->f_mapping->host;
3129
3130         spin_lock(&inode->i_lock);
3131         if (atomic_read(&inode->i_writecount) > 1) {
3132                 spin_unlock(&inode->i_lock);
3133                 return -ETXTBSY;
3134         }
3135         atomic_set(&inode->i_writecount, -1);
3136         spin_unlock(&inode->i_lock);
3137
3138         return 0;
3139 }
3140
3141 static void restore_bitmap_write_access(struct file *file)
3142 {
3143         struct inode *inode = file->f_mapping->host;
3144
3145         spin_lock(&inode->i_lock);
3146         atomic_set(&inode->i_writecount, 1);
3147         spin_unlock(&inode->i_lock);
3148 }
3149
3150 /* mode:
3151  *   0 - completely stop and dis-assemble array
3152  *   1 - switch to readonly
3153  *   2 - stop but do not disassemble array
3154  */
3155 static int do_md_stop(mddev_t * mddev, int mode)
3156 {
3157         int err = 0;
3158         struct gendisk *disk = mddev->gendisk;
3159
3160         if (mddev->pers) {
3161                 if (atomic_read(&mddev->active)>2) {
3162                         printk("md: %s still in use.\n",mdname(mddev));
3163                         return -EBUSY;
3164                 }
3165
3166                 if (mddev->sync_thread) {
3167                         set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3168                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3169                         md_unregister_thread(mddev->sync_thread);
3170                         mddev->sync_thread = NULL;
3171                 }
3172
3173                 del_timer_sync(&mddev->safemode_timer);
3174
3175                 invalidate_partition(disk, 0);
3176
3177                 switch(mode) {
3178                 case 1: /* readonly */
3179                         err  = -ENXIO;
3180                         if (mddev->ro==1)
3181                                 goto out;
3182                         mddev->ro = 1;
3183                         break;
3184                 case 0: /* disassemble */
3185                 case 2: /* stop */
3186                         bitmap_flush(mddev);
3187                         md_super_wait(mddev);
3188                         if (mddev->ro)
3189                                 set_disk_ro(disk, 0);
3190                         blk_queue_make_request(mddev->queue, md_fail_request);
3191                         mddev->pers->stop(mddev);
3192                         if (mddev->pers->sync_request)
3193                                 sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
3194
3195                         module_put(mddev->pers->owner);
3196                         mddev->pers = NULL;
3197                         if (mddev->ro)
3198                                 mddev->ro = 0;
3199                 }
3200                 if (!mddev->in_sync || mddev->sb_dirty) {
3201                         /* mark array as shutdown cleanly */
3202                         mddev->in_sync = 1;
3203                         md_update_sb(mddev);
3204                 }
3205                 if (mode == 1)
3206                         set_disk_ro(disk, 1);
3207                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3208         }
3209
3210         /*
3211          * Free resources if final stop
3212          */
3213         if (mode == 0) {
3214                 mdk_rdev_t *rdev;
3215                 struct list_head *tmp;
3216                 struct gendisk *disk;
3217                 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev));
3218
3219                 bitmap_destroy(mddev);
3220                 if (mddev->bitmap_file) {
3221                         restore_bitmap_write_access(mddev->bitmap_file);
3222                         fput(mddev->bitmap_file);
3223                         mddev->bitmap_file = NULL;
3224                 }
3225                 mddev->bitmap_offset = 0;
3226
3227                 ITERATE_RDEV(mddev,rdev,tmp)
3228                         if (rdev->raid_disk >= 0) {
3229                                 char nm[20];
3230                                 sprintf(nm, "rd%d", rdev->raid_disk);
3231                                 sysfs_remove_link(&mddev->kobj, nm);
3232                         }
3233
3234                 export_array(mddev);
3235
3236                 mddev->array_size = 0;
3237                 mddev->size = 0;
3238                 mddev->raid_disks = 0;
3239
3240                 disk = mddev->gendisk;
3241                 if (disk)
3242                         set_capacity(disk, 0);
3243                 mddev->changed = 1;
3244         } else if (mddev->pers)
3245                 printk(KERN_INFO "md: %s switched to read-only mode.\n",
3246                         mdname(mddev));
3247         err = 0;
3248         md_new_event(mddev);
3249 out:
3250         return err;
3251 }
3252
3253 static void autorun_array(mddev_t *mddev)
3254 {
3255         mdk_rdev_t *rdev;
3256         struct list_head *tmp;
3257         int err;
3258
3259         if (list_empty(&mddev->disks))
3260                 return;
3261
3262         printk(KERN_INFO "md: running: ");
3263
3264         ITERATE_RDEV(mddev,rdev,tmp) {
3265                 char b[BDEVNAME_SIZE];
3266                 printk("<%s>", bdevname(rdev->bdev,b));
3267         }
3268         printk("\n");
3269
3270         err = do_md_run (mddev);
3271         if (err) {
3272                 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
3273                 do_md_stop (mddev, 0);
3274         }
3275 }
3276
3277 /*
3278  * lets try to run arrays based on all disks that have arrived
3279  * until now. (those are in pending_raid_disks)
3280  *
3281  * the method: pick the first pending disk, collect all disks with
3282  * the same UUID, remove all from the pending list and put them into
3283  * the 'same_array' list. Then order this list based on superblock
3284  * update time (freshest comes first), kick out 'old' disks and
3285  * compare superblocks. If everything's fine then run it.
3286  *
3287  * If "unit" is allocated, then bump its reference count
3288  */
3289 static void autorun_devices(int part)
3290 {
3291         struct list_head *tmp;
3292         mdk_rdev_t *rdev0, *rdev;
3293         mddev_t *mddev;
3294         char b[BDEVNAME_SIZE];
3295
3296         printk(KERN_INFO "md: autorun ...\n");
3297         while (!list_empty(&pending_raid_disks)) {
3298                 dev_t dev;
3299                 LIST_HEAD(candidates);
3300                 rdev0 = list_entry(pending_raid_disks.next,
3301                                          mdk_rdev_t, same_set);
3302
3303                 printk(KERN_INFO "md: considering %s ...\n",
3304                         bdevname(rdev0->bdev,b));
3305                 INIT_LIST_HEAD(&candidates);
3306                 ITERATE_RDEV_PENDING(rdev,tmp)
3307                         if (super_90_load(rdev, rdev0, 0) >= 0) {
3308                                 printk(KERN_INFO "md:  adding %s ...\n",
3309                                         bdevname(rdev->bdev,b));
3310                                 list_move(&rdev->same_set, &candidates);
3311                         }
3312                 /*
3313                  * now we have a set of devices, with all of them having
3314                  * mostly sane superblocks. It's time to allocate the
3315                  * mddev.
3316                  */
3317                 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) {
3318                         printk(KERN_INFO "md: unit number in %s is bad: %d\n",
3319                                bdevname(rdev0->bdev, b), rdev0->preferred_minor);
3320                         break;
3321                 }
3322                 if (part)
3323                         dev = MKDEV(mdp_major,
3324                                     rdev0->preferred_minor << MdpMinorShift);
3325                 else
3326                         dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
3327
3328                 md_probe(dev, NULL, NULL);
3329                 mddev = mddev_find(dev);
3330                 if (!mddev) {
3331                         printk(KERN_ERR 
3332                                 "md: cannot allocate memory for md drive.\n");
3333                         break;
3334                 }
3335                 if (mddev_lock(mddev)) 
3336                         printk(KERN_WARNING "md: %s locked, cannot run\n",
3337                                mdname(mddev));
3338                 else if (mddev->raid_disks || mddev->major_version
3339                          || !list_empty(&mddev->disks)) {
3340                         printk(KERN_WARNING 
3341                                 "md: %s already running, cannot run %s\n",
3342                                 mdname(mddev), bdevname(rdev0->bdev,b));
3343                         mddev_unlock(mddev);
3344                 } else {
3345                         printk(KERN_INFO "md: created %s\n", mdname(mddev));
3346                         ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
3347                                 list_del_init(&rdev->same_set);
3348                                 if (bind_rdev_to_array(rdev, mddev))
3349                                         export_rdev(rdev);
3350                         }
3351                         autorun_array(mddev);
3352                         mddev_unlock(mddev);
3353                 }
3354                 /* on success, candidates will be empty, on error
3355                  * it won't...
3356                  */
3357                 ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
3358                         export_rdev(rdev);
3359                 mddev_put(mddev);
3360         }
3361         printk(KERN_INFO "md: ... autorun DONE.\n");
3362 }
3363
3364 /*
3365  * import RAID devices based on one partition
3366  * if possible, the array gets run as well.
3367  */
3368
3369 static int autostart_array(dev_t startdev)
3370 {
3371         char b[BDEVNAME_SIZE];
3372         int err = -EINVAL, i;
3373         mdp_super_t *sb = NULL;
3374         mdk_rdev_t *start_rdev = NULL, *rdev;
3375
3376         start_rdev = md_import_device(startdev, 0, 0);
3377         if (IS_ERR(start_rdev))
3378                 return err;
3379
3380
3381         /* NOTE: this can only work for 0.90.0 superblocks */
3382         sb = (mdp_super_t*)page_address(start_rdev->sb_page);
3383         if (sb->major_version != 0 ||
3384             sb->minor_version != 90 ) {
3385                 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n");
3386                 export_rdev(start_rdev);
3387                 return err;
3388         }
3389
3390         if (test_bit(Faulty, &start_rdev->flags)) {
3391                 printk(KERN_WARNING 
3392                         "md: can not autostart based on faulty %s!\n",
3393                         bdevname(start_rdev->bdev,b));
3394                 export_rdev(start_rdev);
3395                 return err;
3396         }
3397         list_add(&start_rdev->same_set, &pending_raid_disks);
3398
3399         for (i = 0; i < MD_SB_DISKS; i++) {
3400                 mdp_disk_t *desc = sb->disks + i;
3401                 dev_t dev = MKDEV(desc->major, desc->minor);
3402
3403                 if (!dev)
3404                         continue;
3405                 if (dev == startdev)
3406                         continue;
3407                 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor)
3408                         continue;
3409                 rdev = md_import_device(dev, 0, 0);
3410                 if (IS_ERR(rdev))
3411                         continue;
3412
3413                 list_add(&rdev->same_set, &pending_raid_disks);
3414         }
3415
3416         /*
3417          * possibly return codes
3418          */
3419         autorun_devices(0);
3420         return 0;
3421
3422 }
3423
3424
3425 static int get_version(void __user * arg)
3426 {
3427         mdu_version_t ver;
3428
3429         ver.major = MD_MAJOR_VERSION;
3430         ver.minor = MD_MINOR_VERSION;
3431         ver.patchlevel = MD_PATCHLEVEL_VERSION;
3432
3433         if (copy_to_user(arg, &ver, sizeof(ver)))
3434                 return -EFAULT;
3435
3436         return 0;
3437 }
3438
3439 static int get_array_info(mddev_t * mddev, void __user * arg)
3440 {
3441         mdu_array_info_t info;
3442         int nr,working,active,failed,spare;
3443         mdk_rdev_t *rdev;
3444         struct list_head *tmp;
3445
3446         nr=working=active=failed=spare=0;
3447         ITERATE_RDEV(mddev,rdev,tmp) {
3448                 nr++;
3449                 if (test_bit(Faulty, &rdev->flags))
3450                         failed++;
3451                 else {
3452                         working++;
3453                         if (test_bit(In_sync, &rdev->flags))
3454                                 active++;       
3455                         else
3456                                 spare++;
3457                 }
3458         }
3459
3460         info.major_version = mddev->major_version;
3461         info.minor_version = mddev->minor_version;
3462         info.patch_version = MD_PATCHLEVEL_VERSION;
3463         info.ctime         = mddev->ctime;
3464         info.level         = mddev->level;
3465         info.size          = mddev->size;
3466         if (info.size != mddev->size) /* overflow */
3467                 info.size = -1;
3468         info.nr_disks      = nr;
3469         info.raid_disks    = mddev->raid_disks;
3470         info.md_minor      = mddev->md_minor;
3471         info.not_persistent= !mddev->persistent;
3472
3473         info.utime         = mddev->utime;
3474         info.state         = 0;
3475         if (mddev->in_sync)
3476                 info.state = (1<<MD_SB_CLEAN);
3477         if (mddev->bitmap && mddev->bitmap_offset)
3478                 info.state = (1<<MD_SB_BITMAP_PRESENT);
3479         info.active_disks  = active;
3480         info.working_disks = working;
3481         info.failed_disks  = failed;
3482         info.spare_disks   = spare;
3483
3484         info.layout        = mddev->layout;
3485         info.chunk_size    = mddev->chunk_size;
3486
3487         if (copy_to_user(arg, &info, sizeof(info)))
3488                 return -EFAULT;
3489
3490         return 0;
3491 }
3492
3493 static int get_bitmap_file(mddev_t * mddev, void __user * arg)
3494 {
3495         mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
3496         char *ptr, *buf = NULL;
3497         int err = -ENOMEM;
3498
3499         file = kmalloc(sizeof(*file), GFP_KERNEL);
3500         if (!file)
3501                 goto out;
3502
3503         /* bitmap disabled, zero the first byte and copy out */
3504         if (!mddev->bitmap || !mddev->bitmap->file) {
3505                 file->pathname[0] = '\0';
3506                 goto copy_out;
3507         }
3508
3509         buf = kmalloc(sizeof(file->pathname), GFP_KERNEL);
3510         if (!buf)
3511                 goto out;
3512
3513         ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname));
3514         if (!ptr)
3515                 goto out;
3516
3517         strcpy(file->pathname, ptr);
3518
3519 copy_out:
3520         err = 0;
3521         if (copy_to_user(arg, file, sizeof(*file)))
3522                 err = -EFAULT;
3523 out:
3524         kfree(buf);
3525         kfree(file);
3526         return err;
3527 }
3528
3529 static int get_disk_info(mddev_t * mddev, void __user * arg)
3530 {
3531         mdu_disk_info_t info;
3532         unsigned int nr;
3533         mdk_rdev_t *rdev;
3534
3535         if (copy_from_user(&info, arg, sizeof(info)))
3536                 return -EFAULT;
3537
3538         nr = info.number;
3539
3540         rdev = find_rdev_nr(mddev, nr);
3541         if (rdev) {
3542                 info.major = MAJOR(rdev->bdev->bd_dev);
3543                 info.minor = MINOR(rdev->bdev->bd_dev);
3544                 info.raid_disk = rdev->raid_disk;
3545                 info.state = 0;
3546                 if (test_bit(Faulty, &rdev->flags))
3547                         info.state |= (1<<MD_DISK_FAULTY);
3548                 else if (test_bit(In_sync, &rdev->flags)) {
3549                         info.state |= (1<<MD_DISK_ACTIVE);
3550                         info.state |= (1<<MD_DISK_SYNC);
3551                 }
3552                 if (test_bit(WriteMostly, &rdev->flags))
3553                         info.state |= (1<<MD_DISK_WRITEMOSTLY);
3554         } else {
3555                 info.major = info.minor = 0;
3556                 info.raid_disk = -1;
3557                 info.state = (1<<MD_DISK_REMOVED);
3558         }
3559
3560         if (copy_to_user(arg, &info, sizeof(info)))
3561                 return -EFAULT;
3562
3563         return 0;
3564 }
3565
3566 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
3567 {
3568         char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
3569         mdk_rdev_t *rdev;
3570         dev_t dev = MKDEV(info->major,info->minor);
3571
3572         if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
3573                 return -EOVERFLOW;
3574
3575         if (!mddev->raid_disks) {
3576                 int err;
3577                 /* expecting a device which has a superblock */
3578                 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
3579                 if (IS_ERR(rdev)) {
3580                         printk(KERN_WARNING 
3581                                 "md: md_import_device returned %ld\n",
3582                                 PTR_ERR(rdev));
3583                         return PTR_ERR(rdev);
3584                 }
3585                 if (!list_empty(&mddev->disks)) {
3586                         mdk_rdev_t *rdev0 = list_entry(mddev->disks.next,
3587                                                         mdk_rdev_t, same_set);
3588                         int err = super_types[mddev->major_version]
3589                                 .load_super(rdev, rdev0, mddev->minor_version);
3590                         if (err < 0) {
3591                                 printk(KERN_WARNING 
3592                                         "md: %s has different UUID to %s\n",
3593                                         bdevname(rdev->bdev,b), 
3594                                         bdevname(rdev0->bdev,b2));
3595                                 export_rdev(rdev);
3596                                 return -EINVAL;
3597                         }
3598                 }
3599                 err = bind_rdev_to_array(rdev, mddev);
3600                 if (err)
3601                         export_rdev(rdev);
3602                 return err;
3603         }
3604
3605         /*
3606          * add_new_disk can be used once the array is assembled
3607          * to add "hot spares".  They must already have a superblock
3608          * written
3609          */
3610         if (mddev->pers) {
3611                 int err;
3612                 if (!mddev->pers->hot_add_disk) {
3613                         printk(KERN_WARNING 
3614                                 "%s: personality does not support diskops!\n",
3615                                mdname(mddev));
3616                         return -EINVAL;
3617                 }
3618                 if (mddev->persistent)
3619                         rdev = md_import_device(dev, mddev->major_version,
3620                                                 mddev->minor_version);
3621                 else
3622                         rdev = md_import_device(dev, -1, -1);
3623                 if (IS_ERR(rdev)) {
3624                         printk(KERN_WARNING 
3625                                 "md: md_import_device returned %ld\n",
3626                                 PTR_ERR(rdev));
3627                         return PTR_ERR(rdev);
3628                 }
3629                 /* set save_raid_disk if appropriate */
3630                 if (!mddev->persistent) {
3631                         if (info->state & (1<<MD_DISK_SYNC)  &&
3632                             info->raid_disk < mddev->raid_disks)
3633                                 rdev->raid_disk = info->raid_disk;
3634                         else
3635                                 rdev->raid_disk = -1;
3636                 } else
3637                         super_types[mddev->major_version].
3638                                 validate_super(mddev, rdev);
3639                 rdev->saved_raid_disk = rdev->raid_disk;
3640
3641                 clear_bit(In_sync, &rdev->flags); /* just to be sure */
3642                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3643                         set_bit(WriteMostly, &rdev->flags);
3644
3645                 rdev->raid_disk = -1;
3646                 err = bind_rdev_to_array(rdev, mddev);
3647                 if (!err && !mddev->pers->hot_remove_disk) {
3648                         /* If there is hot_add_disk but no hot_remove_disk
3649                          * then added disks for geometry changes,
3650                          * and should be added immediately.
3651                          */
3652                         super_types[mddev->major_version].
3653                                 validate_super(mddev, rdev);
3654                         err = mddev->pers->hot_add_disk(mddev, rdev);
3655                         if (err)
3656                                 unbind_rdev_from_array(rdev);
3657                 }
3658                 if (err)
3659                         export_rdev(rdev);
3660
3661                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3662                 md_wakeup_thread(mddev->thread);
3663                 return err;
3664         }
3665
3666         /* otherwise, add_new_disk is only allowed
3667          * for major_version==0 superblocks
3668          */
3669         if (mddev->major_version != 0) {
3670                 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n",
3671                        mdname(mddev));
3672                 return -EINVAL;
3673         }
3674
3675         if (!(info->state & (1<<MD_DISK_FAULTY))) {
3676                 int err;
3677                 rdev = md_import_device (dev, -1, 0);
3678                 if (IS_ERR(rdev)) {
3679                         printk(KERN_WARNING 
3680                                 "md: error, md_import_device() returned %ld\n",
3681                                 PTR_ERR(rdev));
3682                         return PTR_ERR(rdev);
3683                 }
3684                 rdev->desc_nr = info->number;
3685                 if (info->raid_disk < mddev->raid_disks)
3686                         rdev->raid_disk = info->raid_disk;
3687                 else
3688                         rdev->raid_disk = -1;
3689
3690                 rdev->flags = 0;
3691
3692                 if (rdev->raid_disk < mddev->raid_disks)
3693                         if (info->state & (1<<MD_DISK_SYNC))
3694                                 set_bit(In_sync, &rdev->flags);
3695
3696                 if (info->state & (1<<MD_DISK_WRITEMOSTLY))
3697                         set_bit(WriteMostly, &rdev->flags);
3698
3699                 if (!mddev->persistent) {
3700                         printk(KERN_INFO "md: nonpersistent superblock ...\n");
3701                         rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3702                 } else 
3703                         rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3704                 rdev->size = calc_dev_size(rdev, mddev->chunk_size);
3705
3706                 err = bind_rdev_to_array(rdev, mddev);
3707                 if (err) {
3708                         export_rdev(rdev);
3709                         return err;
3710                 }
3711         }
3712
3713         return 0;
3714 }
3715
3716 static int hot_remove_disk(mddev_t * mddev, dev_t dev)
3717 {
3718         char b[BDEVNAME_SIZE];
3719         mdk_rdev_t *rdev;
3720
3721         if (!mddev->pers)
3722                 return -ENODEV;
3723
3724         rdev = find_rdev(mddev, dev);
3725         if (!rdev)
3726                 return -ENXIO;
3727
3728         if (rdev->raid_disk >= 0)
3729                 goto busy;
3730
3731         kick_rdev_from_array(rdev);
3732         md_update_sb(mddev);
3733         md_new_event(mddev);
3734
3735         return 0;
3736 busy:
3737         printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n",
3738                 bdevname(rdev->bdev,b), mdname(mddev));
3739         return -EBUSY;
3740 }
3741
3742 static int hot_add_disk(mddev_t * mddev, dev_t dev)
3743 {
3744         char b[BDEVNAME_SIZE];
3745         int err;
3746         unsigned int size;
3747         mdk_rdev_t *rdev;
3748
3749         if (!mddev->pers)
3750                 return -ENODEV;
3751
3752         if (mddev->major_version != 0) {
3753                 printk(KERN_WARNING "%s: HOT_ADD may only be used with"
3754                         " version-0 superblocks.\n",
3755                         mdname(mddev));
3756                 return -EINVAL;
3757         }
3758         if (!mddev->pers->hot_add_disk) {
3759                 printk(KERN_WARNING 
3760                         "%s: personality does not support diskops!\n",
3761                         mdname(mddev));
3762                 return -EINVAL;
3763         }
3764
3765         rdev = md_import_device (dev, -1, 0);
3766         if (IS_ERR(rdev)) {
3767                 printk(KERN_WARNING 
3768                         "md: error, md_import_device() returned %ld\n",
3769                         PTR_ERR(rdev));
3770                 return -EINVAL;
3771         }
3772
3773         if (mddev->persistent)
3774                 rdev->sb_offset = calc_dev_sboffset(rdev->bdev);
3775         else
3776                 rdev->sb_offset =
3777                         rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
3778
3779         size = calc_dev_size(rdev, mddev->chunk_size);
3780         rdev->size = size;
3781
3782         if (test_bit(Faulty, &rdev->flags)) {
3783                 printk(KERN_WARNING 
3784                         "md: can not hot-add faulty %s disk to %s!\n",
3785                         bdevname(rdev->bdev,b), mdname(mddev));
3786                 err = -EINVAL;
3787                 goto abort_export;
3788         }
3789         clear_bit(In_sync, &rdev->flags);
3790         rdev->desc_nr = -1;
3791         err = bind_rdev_to_array(rdev, mddev);
3792         if (err)
3793                 goto abort_export;
3794
3795         /*
3796          * The rest should better be atomic, we can have disk failures
3797          * noticed in interrupt contexts ...
3798          */
3799
3800         if (rdev->desc_nr == mddev->max_disks) {
3801                 printk(KERN_WARNING "%s: can not hot-add to full array!\n",
3802                         mdname(mddev));
3803                 err = -EBUSY;
3804                 goto abort_unbind_export;
3805         }
3806
3807         rdev->raid_disk = -1;
3808
3809         md_update_sb(mddev);
3810
3811         /*
3812          * Kick recovery, maybe this spare has to be added to the
3813          * array immediately.
3814          */
3815         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3816         md_wakeup_thread(mddev->thread);
3817         md_new_event(mddev);
3818         return 0;
3819
3820 abort_unbind_export:
3821         unbind_rdev_from_array(rdev);
3822
3823 abort_export:
3824         export_rdev(rdev);
3825         return err;
3826 }
3827
3828 static int set_bitmap_file(mddev_t *mddev, int fd)
3829 {
3830         int err;
3831
3832         if (mddev->pers) {
3833                 if (!mddev->pers->quiesce)
3834                         return -EBUSY;
3835                 if (mddev->recovery || mddev->sync_thread)
3836                         return -EBUSY;
3837                 /* we should be able to change the bitmap.. */
3838         }
3839
3840
3841         if (fd >= 0) {
3842                 if (mddev->bitmap)
3843                         return -EEXIST; /* cannot add when bitmap is present */
3844                 mddev->bitmap_file = fget(fd);
3845
3846                 if (mddev->bitmap_file == NULL) {
3847                         printk(KERN_ERR "%s: error: failed to get bitmap file\n",
3848                                mdname(mddev));
3849                         return -EBADF;
3850                 }
3851
3852                 err = deny_bitmap_write_access(mddev->bitmap_file);
3853                 if (err) {
3854                         printk(KERN_ERR "%s: error: bitmap file is already in use\n",
3855                                mdname(mddev));
3856                         fput(mddev->bitmap_file);
3857                         mddev->bitmap_file = NULL;
3858                         return err;
3859                 }
3860                 mddev->bitmap_offset = 0; /* file overrides offset */
3861         } else if (mddev->bitmap == NULL)
3862                 return -ENOENT; /* cannot remove what isn't there */
3863         err = 0;
3864         if (mddev->pers) {
3865                 mddev->pers->quiesce(mddev, 1);
3866                 if (fd >= 0)
3867                         err = bitmap_create(mddev);
3868                 if (fd < 0 || err) {
3869                         bitmap_destroy(mddev);
3870                         fd = -1; /* make sure to put the file */
3871                 }
3872                 mddev->pers->quiesce(mddev, 0);
3873         }
3874         if (fd < 0) {
3875                 if (mddev->bitmap_file) {
3876                         restore_bitmap_write_access(mddev->bitmap_file);
3877                         fput(mddev->bitmap_file);
3878                 }
3879                 mddev->bitmap_file = NULL;
3880         }
3881
3882         return err;
3883 }
3884
3885 /*
3886  * set_array_info is used two different ways
3887  * The original usage is when creating a new array.
3888  * In this usage, raid_disks is > 0 and it together with
3889  *  level, size, not_persistent,layout,chunksize determine the
3890  *  shape of the array.
3891  *  This will always create an array with a type-0.90.0 superblock.
3892  * The newer usage is when assembling an array.
3893  *  In this case raid_disks will be 0, and the major_version field is
3894  *  use to determine which style super-blocks are to be found on the devices.
3895  *  The minor and patch _version numbers are also kept incase the
3896  *  super_block handler wishes to interpret them.
3897  */
3898 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
3899 {
3900
3901         if (info->raid_disks == 0) {
3902                 /* just setting version number for superblock loading */
3903                 if (info->major_version < 0 ||
3904                     info->major_version >= sizeof(super_types)/sizeof(super_types[0]) ||
3905                     super_types[info->major_version].name == NULL) {
3906                         /* maybe try to auto-load a module? */
3907                         printk(KERN_INFO 
3908                                 "md: superblock version %d not known\n",
3909                                 info->major_version);
3910                         return -EINVAL;
3911                 }
3912                 mddev->major_version = info->major_version;
3913                 mddev->minor_version = info->minor_version;
3914                 mddev->patch_version = info->patch_version;
3915                 return 0;
3916         }
3917         mddev->major_version = MD_MAJOR_VERSION;
3918         mddev->minor_version = MD_MINOR_VERSION;
3919         mddev->patch_version = MD_PATCHLEVEL_VERSION;
3920         mddev->ctime         = get_seconds();
3921
3922         mddev->level         = info->level;
3923         mddev->clevel[0]     = 0;
3924         mddev->size          = info->size;
3925         mddev->raid_disks    = info->raid_disks;
3926         /* don't set md_minor, it is determined by which /dev/md* was
3927          * openned
3928          */
3929         if (info->state & (1<<MD_SB_CLEAN))
3930                 mddev->recovery_cp = MaxSector;
3931         else
3932                 mddev->recovery_cp = 0;
3933         mddev->persistent    = ! info->not_persistent;
3934
3935         mddev->layout        = info->layout;
3936         mddev->chunk_size    = info->chunk_size;
3937
3938         mddev->max_disks     = MD_SB_DISKS;
3939
3940         mddev->sb_dirty      = 1;
3941
3942         mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
3943         mddev->bitmap_offset = 0;
3944
3945         mddev->reshape_position = MaxSector;
3946
3947         /*
3948          * Generate a 128 bit UUID
3949          */
3950         get_random_bytes(mddev->uuid, 16);
3951
3952         mddev->new_level = mddev->level;
3953         mddev->new_chunk = mddev->chunk_size;
3954         mddev->new_layout = mddev->layout;
3955         mddev->delta_disks = 0;
3956
3957         return 0;
3958 }
3959
3960 static int update_size(mddev_t *mddev, unsigned long size)
3961 {
3962         mdk_rdev_t * rdev;
3963         int rv;
3964         struct list_head *tmp;
3965         int fit = (size == 0);
3966
3967         if (mddev->pers->resize == NULL)
3968                 return -EINVAL;
3969         /* The "size" is the amount of each device that is used.
3970          * This can only make sense for arrays with redundancy.
3971          * linear and raid0 always use whatever space is available
3972          * We can only consider changing the size if no resync
3973          * or reconstruction is happening, and if the new size
3974          * is acceptable. It must fit before the sb_offset or,
3975          * if that is <data_offset, it must fit before the
3976          * size of each device.
3977          * If size is zero, we find the largest size that fits.
3978          */
3979         if (mddev->sync_thread)
3980                 return -EBUSY;
3981         ITERATE_RDEV(mddev,rdev,tmp) {
3982                 sector_t avail;
3983                 if (rdev->sb_offset > rdev->data_offset)
3984                         avail = (rdev->sb_offset*2) - rdev->data_offset;
3985                 else
3986                         avail = get_capacity(rdev->bdev->bd_disk)
3987                                 - rdev->data_offset;
3988                 if (fit && (size == 0 || size > avail/2))
3989                         size = avail/2;
3990                 if (avail < ((sector_t)size << 1))
3991                         return -ENOSPC;
3992         }
3993         rv = mddev->pers->resize(mddev, (sector_t)size *2);
3994         if (!rv) {
3995                 struct block_device *bdev;
3996
3997                 bdev = bdget_disk(mddev->gendisk, 0);
3998                 if (bdev) {
3999                         mutex_lock(&bdev->bd_inode->i_mutex);
4000                         i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10);
4001                         mutex_unlock(&bdev->bd_inode->i_mutex);
4002                         bdput(bdev);
4003                 }
4004         }
4005         return rv;
4006 }
4007
4008 static int update_raid_disks(mddev_t *mddev, int raid_disks)
4009 {
4010         int rv;
4011         /* change the number of raid disks */
4012         if (mddev->pers->check_reshape == NULL)
4013                 return -EINVAL;
4014         if (raid_disks <= 0 ||
4015             raid_disks >= mddev->max_disks)
4016                 return -EINVAL;
4017         if (mddev->sync_thread || mddev->reshape_position != MaxSector)
4018                 return -EBUSY;
4019         mddev->delta_disks = raid_disks - mddev->raid_disks;
4020
4021         rv = mddev->pers->check_reshape(mddev);
4022         return rv;
4023 }
4024
4025
4026 /*
4027  * update_array_info is used to change the configuration of an
4028  * on-line array.
4029  * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
4030  * fields in the info are checked against the array.
4031  * Any differences that cannot be handled will cause an error.
4032  * Normally, only one change can be managed at a time.
4033  */
4034 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
4035 {
4036         int rv = 0;
4037         int cnt = 0;
4038         int state = 0;
4039
4040         /* calculate expected state,ignoring low bits */
4041         if (mddev->bitmap && mddev->bitmap_offset)
4042                 state |= (1 << MD_SB_BITMAP_PRESENT);
4043
4044         if (mddev->major_version != info->major_version ||
4045             mddev->minor_version != info->minor_version ||
4046 /*          mddev->patch_version != info->patch_version || */
4047             mddev->ctime         != info->ctime         ||
4048             mddev->level         != info->level         ||
4049 /*          mddev->layout        != info->layout        || */
4050             !mddev->persistent   != info->not_persistent||
4051             mddev->chunk_size    != info->chunk_size    ||
4052             /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
4053             ((state^info->state) & 0xfffffe00)
4054                 )
4055                 return -EINVAL;
4056         /* Check there is only one change */
4057         if (info->size >= 0 && mddev->size != info->size) cnt++;
4058         if (mddev->raid_disks != info->raid_disks) cnt++;
4059         if (mddev->layout != info->layout) cnt++;
4060         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++;
4061         if (cnt == 0) return 0;
4062         if (cnt > 1) return -EINVAL;
4063
4064         if (mddev->layout != info->layout) {
4065                 /* Change layout
4066                  * we don't need to do anything at the md level, the
4067                  * personality will take care of it all.
4068                  */
4069                 if (mddev->pers->reconfig == NULL)
4070                         return -EINVAL;
4071                 else
4072                         return mddev->pers->reconfig(mddev, info->layout, -1);
4073         }
4074         if (info->size >= 0 && mddev->size != info->size)
4075                 rv = update_size(mddev, info->size);
4076
4077         if (mddev->raid_disks    != info->raid_disks)
4078                 rv = update_raid_disks(mddev, info->raid_disks);
4079
4080         if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
4081                 if (mddev->pers->quiesce == NULL)
4082                         return -EINVAL;
4083                 if (mddev->recovery || mddev->sync_thread)
4084                         return -EBUSY;
4085                 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
4086                         /* add the bitmap */
4087                         if (mddev->bitmap)
4088                                 return -EEXIST;
4089                         if (mddev->default_bitmap_offset == 0)
4090                                 return -EINVAL;
4091                         mddev->bitmap_offset = mddev->default_bitmap_offset;
4092                         mddev->pers->quiesce(mddev, 1);
4093                         rv = bitmap_create(mddev);
4094                         if (rv)
4095                                 bitmap_destroy(mddev);
4096                         mddev->pers->quiesce(mddev, 0);
4097                 } else {
4098                         /* remove the bitmap */
4099                         if (!mddev->bitmap)
4100                                 return -ENOENT;
4101                         if (mddev->bitmap->file)
4102                                 return -EINVAL;
4103                         mddev->pers->quiesce(mddev, 1);
4104                         bitmap_destroy(mddev);
4105                         mddev->pers->quiesce(mddev, 0);
4106                         mddev->bitmap_offset = 0;
4107                 }
4108         }
4109         md_update_sb(mddev);
4110         return rv;
4111 }
4112
4113 static int set_disk_faulty(mddev_t *mddev, dev_t dev)
4114 {
4115         mdk_rdev_t *rdev;
4116
4117         if (mddev->pers == NULL)
4118                 return -ENODEV;
4119
4120         rdev = find_rdev(mddev, dev);
4121         if (!rdev)
4122                 return -ENODEV;
4123
4124         md_error(mddev, rdev);
4125         return 0;
4126 }
4127
4128 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4129 {
4130         mddev_t *mddev = bdev->bd_disk->private_data;
4131
4132         geo->heads = 2;
4133         geo->sectors = 4;
4134         geo->cylinders = get_capacity(mddev->gendisk) / 8;
4135         return 0;
4136 }
4137
4138 static int md_ioctl(struct inode *inode, struct file *file,
4139                         unsigned int cmd, unsigned long arg)
4140 {
4141         int err = 0;
4142         void __user *argp = (void __user *)arg;
4143         mddev_t *mddev = NULL;
4144
4145         if (!capable(CAP_SYS_ADMIN))
4146                 return -EACCES;
4147
4148         /*
4149          * Commands dealing with the RAID driver but not any
4150          * particular array:
4151          */
4152         switch (cmd)
4153         {
4154                 case RAID_VERSION:
4155                         err = get_version(argp);
4156                         goto done;
4157
4158                 case PRINT_RAID_DEBUG:
4159                         err = 0;
4160                         md_print_devices();
4161                         goto done;
4162
4163 #ifndef MODULE
4164                 case RAID_AUTORUN:
4165                         err = 0;
4166                         autostart_arrays(arg);
4167                         goto done;
4168 #endif
4169                 default:;
4170         }
4171
4172         /*
4173          * Commands creating/starting a new array:
4174          */
4175
4176         mddev = inode->i_bdev->bd_disk->private_data;
4177
4178         if (!mddev) {
4179                 BUG();
4180                 goto abort;
4181         }
4182
4183
4184         if (cmd == START_ARRAY) {
4185                 /* START_ARRAY doesn't need to lock the array as autostart_array
4186                  * does the locking, and it could even be a different array
4187                  */
4188                 static int cnt = 3;
4189                 if (cnt > 0 ) {
4190                         printk(KERN_WARNING
4191                                "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
4192                                "This will not be supported beyond July 2006\n",
4193                                current->comm, current->pid);
4194                         cnt--;
4195                 }
4196                 err = autostart_array(new_decode_dev(arg));
4197                 if (err) {
4198                         printk(KERN_WARNING "md: autostart failed!\n");
4199                         goto abort;
4200                 }
4201                 goto done;
4202         }
4203
4204         err = mddev_lock(mddev);
4205         if (err) {
4206                 printk(KERN_INFO 
4207                         "md: ioctl lock interrupted, reason %d, cmd %d\n",
4208                         err, cmd);
4209                 goto abort;
4210         }
4211
4212         switch (cmd)
4213         {
4214                 case SET_ARRAY_INFO:
4215                         {
4216                                 mdu_array_info_t info;
4217                                 if (!arg)
4218                                         memset(&info, 0, sizeof(info));
4219                                 else if (copy_from_user(&info, argp, sizeof(info))) {
4220                                         err = -EFAULT;
4221                                         goto abort_unlock;
4222                                 }
4223                                 if (mddev->pers) {
4224                                         err = update_array_info(mddev, &info);
4225                                         if (err) {
4226                                                 printk(KERN_WARNING "md: couldn't update"
4227                                                        " array info. %d\n", err);
4228                                                 goto abort_unlock;
4229                                         }
4230                                         goto done_unlock;
4231                                 }
4232                                 if (!list_empty(&mddev->disks)) {
4233                                         printk(KERN_WARNING
4234                                                "md: array %s already has disks!\n",
4235                                                mdname(mddev));
4236                                         err = -EBUSY;
4237                                         goto abort_unlock;
4238                                 }
4239                                 if (mddev->raid_disks) {
4240                                         printk(KERN_WARNING
4241                                                "md: array %s already initialised!\n",
4242                                                mdname(mddev));
4243                                         err = -EBUSY;
4244                                         goto abort_unlock;
4245                                 }
4246                                 err = set_array_info(mddev, &info);
4247                                 if (err) {
4248                                         printk(KERN_WARNING "md: couldn't set"
4249                                                " array info. %d\n", err);
4250                                         goto abort_unlock;
4251                                 }
4252                         }
4253                         goto done_unlock;
4254
4255                 default:;
4256         }
4257
4258         /*
4259          * Commands querying/configuring an existing array:
4260          */
4261         /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
4262          * RUN_ARRAY, and SET_BITMAP_FILE are allowed */
4263         if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
4264                         && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) {
4265                 err = -ENODEV;
4266                 goto abort_unlock;
4267         }
4268
4269         /*
4270          * Commands even a read-only array can execute:
4271          */
4272         switch (cmd)
4273         {
4274                 case GET_ARRAY_INFO:
4275                         err = get_array_info(mddev, argp);
4276                         goto done_unlock;
4277
4278                 case GET_BITMAP_FILE:
4279                         err = get_bitmap_file(mddev, argp);
4280                         goto done_unlock;
4281
4282                 case GET_DISK_INFO:
4283                         err = get_disk_info(mddev, argp);
4284                         goto done_unlock;
4285
4286                 case RESTART_ARRAY_RW:
4287                         err = restart_array(mddev);
4288                         goto done_unlock;
4289
4290                 case STOP_ARRAY:
4291                         err = do_md_stop (mddev, 0);
4292                         goto done_unlock;
4293
4294                 case STOP_ARRAY_RO:
4295                         err = do_md_stop (mddev, 1);
4296                         goto done_unlock;
4297
4298         /*
4299          * We have a problem here : there is no easy way to give a CHS
4300          * virtual geometry. We currently pretend that we have a 2 heads
4301          * 4 sectors (with a BIG number of cylinders...). This drives
4302          * dosfs just mad... ;-)
4303          */
4304         }
4305
4306         /*
4307          * The remaining ioctls are changing the state of the
4308          * superblock, so we do not allow them on read-only arrays.
4309          * However non-MD ioctls (e.g. get-size) will still come through
4310          * here and hit the 'default' below, so only disallow
4311          * 'md' ioctls, and switch to rw mode if started auto-readonly.
4312          */
4313         if (_IOC_TYPE(cmd) == MD_MAJOR &&
4314             mddev->ro && mddev->pers) {
4315                 if (mddev->ro == 2) {
4316                         mddev->ro = 0;
4317                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4318                 md_wakeup_thread(mddev->thread);
4319
4320                 } else {
4321                         err = -EROFS;
4322                         goto abort_unlock;
4323                 }
4324         }
4325
4326         switch (cmd)
4327         {
4328                 case ADD_NEW_DISK:
4329                 {
4330                         mdu_disk_info_t info;
4331                         if (copy_from_user(&info, argp, sizeof(info)))
4332                                 err = -EFAULT;
4333                         else
4334                                 err = add_new_disk(mddev, &info);
4335                         goto done_unlock;
4336                 }
4337
4338                 case HOT_REMOVE_DISK:
4339                         err = hot_remove_disk(mddev, new_decode_dev(arg));
4340                         goto done_unlock;
4341
4342                 case HOT_ADD_DISK:
4343                         err = hot_add_disk(mddev, new_decode_dev(arg));
4344                         goto done_unlock;
4345
4346                 case SET_DISK_FAULTY:
4347                         err = set_disk_faulty(mddev, new_decode_dev(arg));
4348                         goto done_unlock;
4349
4350                 case RUN_ARRAY:
4351                         err = do_md_run (mddev);
4352                         goto done_unlock;
4353
4354                 case SET_BITMAP_FILE:
4355                         err = set_bitmap_file(mddev, (int)arg);
4356                         goto done_unlock;
4357
4358                 default:
4359                         err = -EINVAL;
4360                         goto abort_unlock;
4361         }
4362
4363 done_unlock:
4364 abort_unlock:
4365         mddev_unlock(mddev);
4366
4367         return err;
4368 done:
4369         if (err)
4370                 MD_BUG();
4371 abort:
4372         return err;
4373 }
4374
4375 static int md_open(struct inode *inode, struct file *file)
4376 {
4377         /*
4378          * Succeed if we can lock the mddev, which confirms that
4379          * it isn't being stopped right now.
4380          */
4381         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4382         int err;
4383
4384         if ((err = mddev_lock(mddev)))
4385                 goto out;
4386
4387         err = 0;
4388         mddev_get(mddev);
4389         mddev_unlock(mddev);
4390
4391         check_disk_change(inode->i_bdev);
4392  out:
4393         return err;
4394 }
4395
4396 static int md_release(struct inode *inode, struct file * file)
4397 {
4398         mddev_t *mddev = inode->i_bdev->bd_disk->private_data;
4399
4400         if (!mddev)
4401                 BUG();
4402         mddev_put(mddev);
4403
4404         return 0;
4405 }
4406
4407 static int md_media_changed(struct gendisk *disk)
4408 {
4409         mddev_t *mddev = disk->private_data;
4410
4411         return mddev->changed;
4412 }
4413
4414 static int md_revalidate(struct gendisk *disk)
4415 {
4416         mddev_t *mddev = disk->private_data;
4417
4418         mddev->changed = 0;
4419         return 0;
4420 }
4421 static struct block_device_operations md_fops =
4422 {
4423         .owner          = THIS_MODULE,
4424         .open           = md_open,
4425         .release        = md_release,
4426         .ioctl          = md_ioctl,
4427         .getgeo         = md_getgeo,
4428         .media_changed  = md_media_changed,
4429         .revalidate_disk= md_revalidate,
4430 };
4431
4432 static int md_thread(void * arg)
4433 {
4434         mdk_thread_t *thread = arg;
4435
4436         /*
4437          * md_thread is a 'system-thread', it's priority should be very
4438          * high. We avoid resource deadlocks individually in each
4439          * raid personality. (RAID5 does preallocation) We also use RR and
4440          * the very same RT priority as kswapd, thus we will never get
4441          * into a priority inversion deadlock.
4442          *
4443          * we definitely have to have equal or higher priority than
4444          * bdflush, otherwise bdflush will deadlock if there are too
4445          * many dirty RAID5 blocks.
4446          */
4447
4448         allow_signal(SIGKILL);
4449         while (!kthread_should_stop()) {
4450
4451                 /* We need to wait INTERRUPTIBLE so that
4452                  * we don't add to the load-average.
4453                  * That means we need to be sure no signals are
4454                  * pending
4455                  */
4456                 if (signal_pending(current))
4457                         flush_signals(current);
4458
4459                 wait_event_interruptible_timeout
4460                         (thread->wqueue,
4461                          test_bit(THREAD_WAKEUP, &thread->flags)
4462                          || kthread_should_stop(),
4463                          thread->timeout);
4464                 try_to_freeze();
4465
4466                 clear_bit(THREAD_WAKEUP, &thread->flags);
4467
4468                 thread->run(thread->mddev);
4469         }
4470
4471         return 0;
4472 }
4473
4474 void md_wakeup_thread(mdk_thread_t *thread)
4475 {
4476         if (thread) {
4477                 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm);
4478                 set_bit(THREAD_WAKEUP, &thread->flags);
4479                 wake_up(&thread->wqueue);
4480         }
4481 }
4482
4483 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
4484                                  const char *name)
4485 {
4486         mdk_thread_t *thread;
4487
4488         thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL);
4489         if (!thread)
4490                 return NULL;
4491
4492         init_waitqueue_head(&thread->wqueue);
4493
4494         thread->run = run;
4495         thread->mddev = mddev;
4496         thread->timeout = MAX_SCHEDULE_TIMEOUT;
4497         thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
4498         if (IS_ERR(thread->tsk)) {
4499                 kfree(thread);
4500                 return NULL;
4501         }
4502         return thread;
4503 }
4504
4505 void md_unregister_thread(mdk_thread_t *thread)
4506 {
4507         dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid);
4508
4509         kthread_stop(thread->tsk);
4510         kfree(thread);
4511 }
4512
4513 void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
4514 {
4515         if (!mddev) {
4516                 MD_BUG();
4517                 return;
4518         }
4519
4520         if (!rdev || test_bit(Faulty, &rdev->flags))
4521                 return;
4522 /*
4523         dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
4524                 mdname(mddev),
4525                 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev),
4526                 __builtin_return_address(0),__builtin_return_address(1),
4527                 __builtin_return_address(2),__builtin_return_address(3));
4528 */
4529         if (!mddev->pers->error_handler)
4530                 return;
4531         mddev->pers->error_handler(mddev,rdev);
4532         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4533         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4534         md_wakeup_thread(mddev->thread);
4535         md_new_event_inintr(mddev);
4536 }
4537
4538 /* seq_file implementation /proc/mdstat */
4539
4540 static void status_unused(struct seq_file *seq)
4541 {
4542         int i = 0;
4543         mdk_rdev_t *rdev;
4544         struct list_head *tmp;
4545
4546         seq_printf(seq, "unused devices: ");
4547
4548         ITERATE_RDEV_PENDING(rdev,tmp) {
4549                 char b[BDEVNAME_SIZE];
4550                 i++;
4551                 seq_printf(seq, "%s ",
4552                               bdevname(rdev->bdev,b));
4553         }
4554         if (!i)
4555                 seq_printf(seq, "<none>");
4556
4557         seq_printf(seq, "\n");
4558 }
4559
4560
4561 static void status_resync(struct seq_file *seq, mddev_t * mddev)
4562 {
4563         sector_t max_blocks, resync, res;
4564         unsigned long dt, db, rt;
4565         int scale;
4566         unsigned int per_milli;
4567
4568         resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2;
4569
4570         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
4571                 max_blocks = mddev->resync_max_sectors >> 1;
4572         else
4573                 max_blocks = mddev->size;
4574
4575         /*
4576          * Should not happen.
4577          */
4578         if (!max_blocks) {
4579                 MD_BUG();
4580                 return;
4581         }
4582         /* Pick 'scale' such that (resync>>scale)*1000 will fit
4583          * in a sector_t, and (max_blocks>>scale) will fit in a
4584          * u32, as those are the requirements for sector_div.
4585          * Thus 'scale' must be at least 10
4586          */
4587         scale = 10;
4588         if (sizeof(sector_t) > sizeof(unsigned long)) {
4589                 while ( max_blocks/2 > (1ULL<<(scale+32)))
4590                         scale++;
4591         }
4592         res = (resync>>scale)*1000;
4593         sector_div(res, (u32)((max_blocks>>scale)+1));
4594
4595         per_milli = res;
4596         {
4597                 int i, x = per_milli/50, y = 20-x;
4598                 seq_printf(seq, "[");
4599                 for (i = 0; i < x; i++)
4600                         seq_printf(seq, "=");
4601                 seq_printf(seq, ">");
4602                 for (i = 0; i < y; i++)
4603                         seq_printf(seq, ".");
4604                 seq_printf(seq, "] ");
4605         }
4606         seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
4607                    (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
4608                     "reshape" :
4609                       (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
4610                        "resync" : "recovery")),
4611                       per_milli/10, per_milli % 10,
4612                    (unsigned long long) resync,
4613                    (unsigned long long) max_blocks);
4614
4615         /*
4616          * We do not want to overflow, so the order of operands and
4617          * the * 100 / 100 trick are important. We do a +1 to be
4618          * safe against division by zero. We only estimate anyway.
4619          *
4620          * dt: time from mark until now
4621          * db: blocks written from mark until now
4622          * rt: remaining time
4623          */
4624         dt = ((jiffies - mddev->resync_mark) / HZ);
4625         if (!dt) dt++;
4626         db = resync - (mddev->resync_mark_cnt/2);
4627         rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100;
4628
4629         seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6);
4630
4631         seq_printf(seq, " speed=%ldK/sec", db/dt);
4632 }
4633
4634 static void *md_seq_start(struct seq_file *seq, loff_t *pos)
4635 {
4636         struct list_head *tmp;
4637         loff_t l = *pos;
4638         mddev_t *mddev;
4639
4640         if (l >= 0x10000)
4641                 return NULL;
4642         if (!l--)
4643                 /* header */
4644                 return (void*)1;
4645
4646         spin_lock(&all_mddevs_lock);
4647         list_for_each(tmp,&all_mddevs)
4648                 if (!l--) {
4649                         mddev = list_entry(tmp, mddev_t, all_mddevs);
4650                         mddev_get(mddev);
4651                         spin_unlock(&all_mddevs_lock);
4652                         return mddev;
4653                 }
4654         spin_unlock(&all_mddevs_lock);
4655         if (!l--)
4656                 return (void*)2;/* tail */
4657         return NULL;
4658 }
4659
4660 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4661 {
4662         struct list_head *tmp;
4663         mddev_t *next_mddev, *mddev = v;
4664         
4665         ++*pos;
4666         if (v == (void*)2)
4667                 return NULL;
4668
4669         spin_lock(&all_mddevs_lock);
4670         if (v == (void*)1)
4671                 tmp = all_mddevs.next;
4672         else
4673                 tmp = mddev->all_mddevs.next;
4674         if (tmp != &all_mddevs)
4675                 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs));
4676         else {
4677                 next_mddev = (void*)2;
4678                 *pos = 0x10000;
4679         }               
4680         spin_unlock(&all_mddevs_lock);
4681
4682         if (v != (void*)1)
4683                 mddev_put(mddev);
4684         return next_mddev;
4685
4686 }
4687
4688 static void md_seq_stop(struct seq_file *seq, void *v)
4689 {
4690         mddev_t *mddev = v;
4691
4692         if (mddev && v != (void*)1 && v != (void*)2)
4693                 mddev_put(mddev);
4694 }
4695
4696 struct mdstat_info {
4697         int event;
4698 };
4699
4700 static int md_seq_show(struct seq_file *seq, void *v)
4701 {
4702         mddev_t *mddev = v;
4703         sector_t size;
4704         struct list_head *tmp2;
4705         mdk_rdev_t *rdev;
4706         struct mdstat_info *mi = seq->private;
4707         struct bitmap *bitmap;
4708
4709         if (v == (void*)1) {
4710                 struct mdk_personality *pers;
4711                 seq_printf(seq, "Personalities : ");
4712                 spin_lock(&pers_lock);
4713                 list_for_each_entry(pers, &pers_list, list)
4714                         seq_printf(seq, "[%s] ", pers->name);
4715
4716                 spin_unlock(&pers_lock);
4717                 seq_printf(seq, "\n");
4718                 mi->event = atomic_read(&md_event_count);
4719                 return 0;
4720         }
4721         if (v == (void*)2) {
4722                 status_unused(seq);
4723                 return 0;
4724         }
4725
4726         if (mddev_lock(mddev) < 0)
4727                 return -EINTR;
4728
4729         if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
4730                 seq_printf(seq, "%s : %sactive", mdname(mddev),
4731                                                 mddev->pers ? "" : "in");
4732                 if (mddev->pers) {
4733                         if (mddev->ro==1)
4734                                 seq_printf(seq, " (read-only)");
4735                         if (mddev->ro==2)
4736                                 seq_printf(seq, "(auto-read-only)");
4737                         seq_printf(seq, " %s", mddev->pers->name);
4738                 }
4739
4740                 size = 0;
4741                 ITERATE_RDEV(mddev,rdev,tmp2) {
4742                         char b[BDEVNAME_SIZE];
4743                         seq_printf(seq, " %s[%d]",
4744                                 bdevname(rdev->bdev,b), rdev->desc_nr);
4745                         if (test_bit(WriteMostly, &rdev->flags))
4746                                 seq_printf(seq, "(W)");
4747                         if (test_bit(Faulty, &rdev->flags)) {
4748                                 seq_printf(seq, "(F)");
4749                                 continue;
4750                         } else if (rdev->raid_disk < 0)
4751                                 seq_printf(seq, "(S)"); /* spare */
4752                         size += rdev->size;
4753                 }
4754
4755                 if (!list_empty(&mddev->disks)) {
4756                         if (mddev->pers)
4757                                 seq_printf(seq, "\n      %llu blocks",
4758                                         (unsigned long long)mddev->array_size);
4759                         else
4760                                 seq_printf(seq, "\n      %llu blocks",
4761                                         (unsigned long long)size);
4762                 }
4763                 if (mddev->persistent) {
4764                         if (mddev->major_version != 0 ||
4765                             mddev->minor_version != 90) {
4766                                 seq_printf(seq," super %d.%d",
4767                                            mddev->major_version,
4768                                            mddev->minor_version);
4769                         }
4770                 } else
4771                         seq_printf(seq, " super non-persistent");
4772
4773                 if (mddev->pers) {
4774                         mddev->pers->status (seq, mddev);
4775                         seq_printf(seq, "\n      ");
4776                         if (mddev->pers->sync_request) {
4777                                 if (mddev->curr_resync > 2) {
4778                                         status_resync (seq, mddev);
4779                                         seq_printf(seq, "\n      ");
4780                                 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
4781                                         seq_printf(seq, "\tresync=DELAYED\n      ");
4782                                 else if (mddev->recovery_cp < MaxSector)
4783                                         seq_printf(seq, "\tresync=PENDING\n      ");
4784                         }
4785                 } else
4786                         seq_printf(seq, "\n       ");
4787
4788                 if ((bitmap = mddev->bitmap)) {
4789                         unsigned long chunk_kb;
4790                         unsigned long flags;
4791                         spin_lock_irqsave(&bitmap->lock, flags);
4792                         chunk_kb = bitmap->chunksize >> 10;
4793                         seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], "
4794                                 "%lu%s chunk",
4795                                 bitmap->pages - bitmap->missing_pages,
4796                                 bitmap->pages,
4797                                 (bitmap->pages - bitmap->missing_pages)
4798                                         << (PAGE_SHIFT - 10),
4799                                 chunk_kb ? chunk_kb : bitmap->chunksize,
4800                                 chunk_kb ? "KB" : "B");
4801                         if (bitmap->file) {
4802                                 seq_printf(seq, ", file: ");
4803                                 seq_path(seq, bitmap->file->f_vfsmnt,
4804                                          bitmap->file->f_dentry," \t\n");
4805                         }
4806
4807                         seq_printf(seq, "\n");
4808                         spin_unlock_irqrestore(&bitmap->lock, flags);
4809                 }
4810
4811                 seq_printf(seq, "\n");
4812         }
4813         mddev_unlock(mddev);
4814         
4815         return 0;
4816 }
4817
4818 static struct seq_operations md_seq_ops = {
4819         .start  = md_seq_start,
4820         .next   = md_seq_next,
4821         .stop   = md_seq_stop,
4822         .show   = md_seq_show,
4823 };
4824
4825 static int md_seq_open(struct inode *inode, struct file *file)
4826 {
4827         int error;
4828         struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL);
4829         if (mi == NULL)
4830                 return -ENOMEM;
4831
4832         error = seq_open(file, &md_seq_ops);
4833         if (error)
4834                 kfree(mi);
4835         else {
4836                 struct seq_file *p = file->private_data;
4837                 p->private = mi;
4838                 mi->event = atomic_read(&md_event_count);
4839         }
4840         return error;
4841 }
4842
4843 static int md_seq_release(struct inode *inode, struct file *file)
4844 {
4845         struct seq_file *m = file->private_data;
4846         struct mdstat_info *mi = m->private;
4847         m->private = NULL;
4848         kfree(mi);
4849         return seq_release(inode, file);
4850 }
4851
4852 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
4853 {
4854         struct seq_file *m = filp->private_data;
4855         struct mdstat_info *mi = m->private;
4856         int mask;
4857
4858         poll_wait(filp, &md_event_waiters, wait);
4859
4860         /* always allow read */
4861         mask = POLLIN | POLLRDNORM;
4862
4863         if (mi->event != atomic_read(&md_event_count))
4864                 mask |= POLLERR | POLLPRI;
4865         return mask;
4866 }
4867
4868 static struct file_operations md_seq_fops = {
4869         .open           = md_seq_open,
4870         .read           = seq_read,
4871         .llseek         = seq_lseek,
4872         .release        = md_seq_release,
4873         .poll           = mdstat_poll,
4874 };
4875
4876 int register_md_personality(struct mdk_personality *p)
4877 {
4878         spin_lock(&pers_lock);
4879         list_add_tail(&p->list, &pers_list);
4880         printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level);
4881         spin_unlock(&pers_lock);
4882         return 0;
4883 }
4884
4885 int unregister_md_personality(struct mdk_personality *p)
4886 {
4887         printk(KERN_INFO "md: %s personality unregistered\n", p->name);
4888         spin_lock(&pers_lock);
4889         list_del_init(&p->list);
4890         spin_unlock(&pers_lock);
4891         return 0;
4892 }
4893
4894 static int is_mddev_idle(mddev_t *mddev)
4895 {
4896         mdk_rdev_t * rdev;
4897         struct list_head *tmp;
4898         int idle;
4899         unsigned long curr_events;
4900
4901         idle = 1;
4902         ITERATE_RDEV(mddev,rdev,tmp) {
4903                 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
4904                 curr_events = disk_stat_read(disk, sectors[0]) + 
4905                                 disk_stat_read(disk, sectors[1]) - 
4906                                 atomic_read(&disk->sync_io);
4907                 /* The difference between curr_events and last_events
4908                  * will be affected by any new non-sync IO (making
4909                  * curr_events bigger) and any difference in the amount of
4910                  * in-flight syncio (making current_events bigger or smaller)
4911                  * The amount in-flight is currently limited to
4912                  * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
4913                  * which is at most 4096 sectors.
4914                  * These numbers are fairly fragile and should be made
4915                  * more robust, probably by enforcing the
4916                  * 'window size' that md_do_sync sort-of uses.
4917                  *
4918                  * Note: the following is an unsigned comparison.
4919                  */
4920                 if ((curr_events - rdev->last_events + 4096) > 8192) {
4921                         rdev->last_events = curr_events;
4922                         idle = 0;
4923                 }
4924         }
4925         return idle;
4926 }
4927
4928 void md_done_sync(mddev_t *mddev, int blocks, int ok)
4929 {
4930         /* another "blocks" (512byte) blocks have been synced */
4931         atomic_sub(blocks, &mddev->recovery_active);
4932         wake_up(&mddev->recovery_wait);
4933         if (!ok) {
4934                 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
4935                 md_wakeup_thread(mddev->thread);
4936                 // stop recovery, signal do_sync ....
4937         }
4938 }
4939
4940
4941 /* md_write_start(mddev, bi)
4942  * If we need to update some array metadata (e.g. 'active' flag
4943  * in superblock) before writing, schedule a superblock update
4944  * and wait for it to complete.
4945  */
4946 void md_write_start(mddev_t *mddev, struct bio *bi)
4947 {
4948         if (bio_data_dir(bi) != WRITE)
4949                 return;
4950
4951         BUG_ON(mddev->ro == 1);
4952         if (mddev->ro == 2) {
4953                 /* need to switch to read/write */
4954                 mddev->ro = 0;
4955                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4956                 md_wakeup_thread(mddev->thread);
4957         }
4958         atomic_inc(&mddev->writes_pending);
4959         if (mddev->in_sync) {
4960                 spin_lock_irq(&mddev->write_lock);
4961                 if (mddev->in_sync) {
4962                         mddev->in_sync = 0;
4963                         mddev->sb_dirty = 3;
4964                         md_wakeup_thread(mddev->thread);
4965                 }
4966                 spin_unlock_irq(&mddev->write_lock);
4967         }
4968         wait_event(mddev->sb_wait, mddev->sb_dirty==0);
4969 }
4970
4971 void md_write_end(mddev_t *mddev)
4972 {
4973         if (atomic_dec_and_test(&mddev->writes_pending)) {
4974                 if (mddev->safemode == 2)
4975                         md_wakeup_thread(mddev->thread);
4976                 else if (mddev->safemode_delay)
4977                         mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay);
4978         }
4979 }
4980
4981 static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
4982
4983 #define SYNC_MARKS      10
4984 #define SYNC_MARK_STEP  (3*HZ)
4985 void md_do_sync(mddev_t *mddev)
4986 {
4987         mddev_t *mddev2;
4988         unsigned int currspeed = 0,
4989                  window;
4990         sector_t max_sectors,j, io_sectors;
4991         unsigned long mark[SYNC_MARKS];
4992         sector_t mark_cnt[SYNC_MARKS];
4993         int last_mark,m;
4994         struct list_head *tmp;
4995         sector_t last_check;
4996         int skipped = 0;
4997         struct list_head *rtmp;
4998         mdk_rdev_t *rdev;
4999
5000         /* just incase thread restarts... */
5001         if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
5002                 return;
5003         if (mddev->ro) /* never try to sync a read-only array */
5004                 return;
5005
5006         /* we overload curr_resync somewhat here.
5007          * 0 == not engaged in resync at all
5008          * 2 == checking that there is no conflict with another sync
5009          * 1 == like 2, but have yielded to allow conflicting resync to
5010          *              commense
5011          * other == active in resync - this many blocks
5012          *
5013          * Before starting a resync we must have set curr_resync to
5014          * 2, and then checked that every "conflicting" array has curr_resync
5015          * less than ours.  When we find one that is the same or higher
5016          * we wait on resync_wait.  To avoid deadlock, we reduce curr_resync
5017          * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
5018          * This will mean we have to start checking from the beginning again.
5019          *
5020          */
5021
5022         do {
5023                 mddev->curr_resync = 2;
5024
5025         try_again:
5026                 if (kthread_should_stop()) {
5027                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5028                         goto skip;
5029                 }
5030                 ITERATE_MDDEV(mddev2,tmp) {
5031                         if (mddev2 == mddev)
5032                                 continue;
5033                         if (mddev2->curr_resync && 
5034                             match_mddev_units(mddev,mddev2)) {
5035                                 DEFINE_WAIT(wq);
5036                                 if (mddev < mddev2 && mddev->curr_resync == 2) {
5037                                         /* arbitrarily yield */
5038                                         mddev->curr_resync = 1;
5039                                         wake_up(&resync_wait);
5040                                 }
5041                                 if (mddev > mddev2 && mddev->curr_resync == 1)
5042                                         /* no need to wait here, we can wait the next
5043                                          * time 'round when curr_resync == 2
5044                                          */
5045                                         continue;
5046                                 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
5047                                 if (!kthread_should_stop() &&
5048                                     mddev2->curr_resync >= mddev->curr_resync) {
5049                                         printk(KERN_INFO "md: delaying resync of %s"
5050                                                " until %s has finished resync (they"
5051                                                " share one or more physical units)\n",
5052                                                mdname(mddev), mdname(mddev2));
5053                                         mddev_put(mddev2);
5054                                         schedule();
5055                                         finish_wait(&resync_wait, &wq);
5056                                         goto try_again;
5057                                 }
5058                                 finish_wait(&resync_wait, &wq);
5059                         }
5060                 }
5061         } while (mddev->curr_resync < 2);
5062
5063         j = 0;
5064         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5065                 /* resync follows the size requested by the personality,
5066                  * which defaults to physical size, but can be virtual size
5067                  */
5068                 max_sectors = mddev->resync_max_sectors;
5069                 mddev->resync_mismatches = 0;
5070                 /* we don't use the checkpoint if there's a bitmap */
5071                 if (!mddev->bitmap &&
5072                     !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
5073                         j = mddev->recovery_cp;
5074         } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
5075                 max_sectors = mddev->size << 1;
5076         else {
5077                 /* recovery follows the physical size of devices */
5078                 max_sectors = mddev->size << 1;
5079                 j = MaxSector;
5080                 ITERATE_RDEV(mddev,rdev,rtmp)
5081                         if (rdev->raid_disk >= 0 &&
5082                             !test_bit(Faulty, &rdev->flags) &&
5083                             !test_bit(In_sync, &rdev->flags) &&
5084                             rdev->recovery_offset < j)
5085                                 j = rdev->recovery_offset;
5086         }
5087
5088         printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev));
5089         printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:"
5090                 " %d KB/sec/disc.\n", speed_min(mddev));
5091         printk(KERN_INFO "md: using maximum available idle IO bandwidth "
5092                "(but not more than %d KB/sec) for reconstruction.\n",
5093                speed_max(mddev));
5094
5095         is_mddev_idle(mddev); /* this also initializes IO event counters */
5096
5097         io_sectors = 0;
5098         for (m = 0; m < SYNC_MARKS; m++) {
5099                 mark[m] = jiffies;
5100                 mark_cnt[m] = io_sectors;
5101         }
5102         last_mark = 0;
5103         mddev->resync_mark = mark[last_mark];
5104         mddev->resync_mark_cnt = mark_cnt[last_mark];
5105
5106         /*
5107          * Tune reconstruction:
5108          */
5109         window = 32*(PAGE_SIZE/512);
5110         printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n",
5111                 window/2,(unsigned long long) max_sectors/2);
5112
5113         atomic_set(&mddev->recovery_active, 0);
5114         init_waitqueue_head(&mddev->recovery_wait);
5115         last_check = 0;
5116
5117         if (j>2) {
5118                 printk(KERN_INFO 
5119                         "md: resuming recovery of %s from checkpoint.\n",
5120                         mdname(mddev));
5121                 mddev->curr_resync = j;
5122         }
5123
5124         while (j < max_sectors) {
5125                 sector_t sectors;
5126
5127                 skipped = 0;
5128                 sectors = mddev->pers->sync_request(mddev, j, &skipped,
5129                                             currspeed < speed_min(mddev));
5130                 if (sectors == 0) {
5131                         set_bit(MD_RECOVERY_ERR, &mddev->recovery);
5132                         goto out;
5133                 }
5134
5135                 if (!skipped) { /* actual IO requested */
5136                         io_sectors += sectors;
5137                         atomic_add(sectors, &mddev->recovery_active);
5138                 }
5139
5140                 j += sectors;
5141                 if (j>1) mddev->curr_resync = j;
5142                 if (last_check == 0)
5143                         /* this is the earliers that rebuilt will be
5144                          * visible in /proc/mdstat
5145                          */
5146                         md_new_event(mddev);
5147
5148                 if (last_check + window > io_sectors || j == max_sectors)
5149                         continue;
5150
5151                 last_check = io_sectors;
5152
5153                 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
5154                     test_bit(MD_RECOVERY_ERR, &mddev->recovery))
5155                         break;
5156
5157         repeat:
5158                 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
5159                         /* step marks */
5160                         int next = (last_mark+1) % SYNC_MARKS;
5161
5162                         mddev->resync_mark = mark[next];
5163                         mddev->resync_mark_cnt = mark_cnt[next];
5164                         mark[next] = jiffies;
5165                         mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
5166                         last_mark = next;
5167                 }
5168
5169
5170                 if (kthread_should_stop()) {
5171                         /*
5172                          * got a signal, exit.
5173                          */
5174                         printk(KERN_INFO 
5175                                 "md: md_do_sync() got signal ... exiting\n");
5176                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5177                         goto out;
5178                 }
5179
5180                 /*
5181                  * this loop exits only if either when we are slower than
5182                  * the 'hard' speed limit, or the system was IO-idle for
5183                  * a jiffy.
5184                  * the system might be non-idle CPU-wise, but we only care
5185                  * about not overloading the IO subsystem. (things like an
5186                  * e2fsck being done on the RAID array should execute fast)
5187                  */
5188                 mddev->queue->unplug_fn(mddev->queue);
5189                 cond_resched();
5190
5191                 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
5192                         /((jiffies-mddev->resync_mark)/HZ +1) +1;
5193
5194                 if (currspeed > speed_min(mddev)) {
5195                         if ((currspeed > speed_max(mddev)) ||
5196                                         !is_mddev_idle(mddev)) {
5197                                 msleep(500);
5198                                 goto repeat;
5199                         }
5200                 }
5201         }
5202         printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev));
5203         /*
5204          * this also signals 'finished resyncing' to md_stop
5205          */
5206  out:
5207         mddev->queue->unplug_fn(mddev->queue);
5208
5209         wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
5210
5211         /* tell personality that we are finished */
5212         mddev->pers->sync_request(mddev, max_sectors, &skipped, 1);
5213
5214         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5215             test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
5216             !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
5217             mddev->curr_resync > 2) {
5218                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
5219                         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5220                                 if (mddev->curr_resync >= mddev->recovery_cp) {
5221                                         printk(KERN_INFO
5222                                                "md: checkpointing recovery of %s.\n",
5223                                                mdname(mddev));
5224                                         mddev->recovery_cp = mddev->curr_resync;
5225                                 }
5226                         } else
5227                                 mddev->recovery_cp = MaxSector;
5228                 } else {
5229                         if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5230                                 mddev->curr_resync = MaxSector;
5231                         ITERATE_RDEV(mddev,rdev,rtmp)
5232                                 if (rdev->raid_disk >= 0 &&
5233                                     !test_bit(Faulty, &rdev->flags) &&
5234                                     !test_bit(In_sync, &rdev->flags) &&
5235                                     rdev->recovery_offset < mddev->curr_resync)
5236                                         rdev->recovery_offset = mddev->curr_resync;
5237                         mddev->sb_dirty = 1;
5238                 }
5239         }
5240
5241  skip:
5242         mddev->curr_resync = 0;
5243         wake_up(&resync_wait);
5244         set_bit(MD_RECOVERY_DONE, &mddev->recovery);
5245         md_wakeup_thread(mddev->thread);
5246 }
5247 EXPORT_SYMBOL_GPL(md_do_sync);
5248
5249
5250 /*
5251  * This routine is regularly called by all per-raid-array threads to
5252  * deal with generic issues like resync and super-block update.
5253  * Raid personalities that don't have a thread (linear/raid0) do not
5254  * need this as they never do any recovery or update the superblock.
5255  *
5256  * It does not do any resync itself, but rather "forks" off other threads
5257  * to do that as needed.
5258  * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
5259  * "->recovery" and create a thread at ->sync_thread.
5260  * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR)
5261  * and wakeups up this thread which will reap the thread and finish up.
5262  * This thread also removes any faulty devices (with nr_pending == 0).
5263  *
5264  * The overall approach is:
5265  *  1/ if the superblock needs updating, update it.
5266  *  2/ If a recovery thread is running, don't do anything else.
5267  *  3/ If recovery has finished, clean up, possibly marking spares active.
5268  *  4/ If there are any faulty devices, remove them.
5269  *  5/ If array is degraded, try to add spares devices
5270  *  6/ If array has spares or is not in-sync, start a resync thread.
5271  */
5272 void md_check_recovery(mddev_t *mddev)
5273 {
5274         mdk_rdev_t *rdev;
5275         struct list_head *rtmp;
5276
5277
5278         if (mddev->bitmap)
5279                 bitmap_daemon_work(mddev->bitmap);
5280
5281         if (mddev->ro)
5282                 return;
5283
5284         if (signal_pending(current)) {
5285                 if (mddev->pers->sync_request) {
5286                         printk(KERN_INFO "md: %s in immediate safe mode\n",
5287                                mdname(mddev));
5288                         mddev->safemode = 2;
5289                 }
5290                 flush_signals(current);
5291         }
5292
5293         if ( ! (
5294                 mddev->sb_dirty ||
5295                 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
5296                 test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
5297                 (mddev->safemode == 1) ||
5298                 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
5299                  && !mddev->in_sync && mddev->recovery_cp == MaxSector)
5300                 ))
5301                 return;
5302
5303         if (mddev_trylock(mddev)) {
5304                 int spares =0;
5305
5306                 spin_lock_irq(&mddev->write_lock);
5307                 if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
5308                     !mddev->in_sync && mddev->recovery_cp == MaxSector) {
5309                         mddev->in_sync = 1;
5310                         mddev->sb_dirty = 3;
5311                 }
5312                 if (mddev->safemode == 1)
5313                         mddev->safemode = 0;
5314                 spin_unlock_irq(&mddev->write_lock);
5315
5316                 if (mddev->sb_dirty)
5317                         md_update_sb(mddev);
5318
5319
5320                 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
5321                     !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
5322                         /* resync/recovery still happening */
5323                         clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5324                         goto unlock;
5325                 }
5326                 if (mddev->sync_thread) {
5327                         /* resync has finished, collect result */
5328                         md_unregister_thread(mddev->sync_thread);
5329                         mddev->sync_thread = NULL;
5330                         if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) &&
5331                             !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
5332                                 /* success...*/
5333                                 /* activate any spares */
5334                                 mddev->pers->spare_active(mddev);
5335                         }
5336                         md_update_sb(mddev);
5337
5338                         /* if array is no-longer degraded, then any saved_raid_disk
5339                          * information must be scrapped
5340                          */
5341                         if (!mddev->degraded)
5342                                 ITERATE_RDEV(mddev,rdev,rtmp)
5343                                         rdev->saved_raid_disk = -1;
5344
5345                         mddev->recovery = 0;
5346                         /* flag recovery needed just to double check */
5347                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5348                         md_new_event(mddev);
5349                         goto unlock;
5350                 }
5351                 /* Clear some bits that don't mean anything, but
5352                  * might be left set
5353                  */
5354                 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5355                 clear_bit(MD_RECOVERY_ERR, &mddev->recovery);
5356                 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
5357                 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
5358
5359                 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
5360                         goto unlock;
5361                 /* no recovery is running.
5362                  * remove any failed drives, then
5363                  * add spares if possible.
5364                  * Spare are also removed and re-added, to allow
5365                  * the personality to fail the re-add.
5366                  */
5367                 ITERATE_RDEV(mddev,rdev,rtmp)
5368                         if (rdev->raid_disk >= 0 &&
5369                             (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) &&
5370                             atomic_read(&rdev->nr_pending)==0) {
5371                                 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) {
5372                                         char nm[20];
5373                                         sprintf(nm,"rd%d", rdev->raid_disk);
5374                                         sysfs_remove_link(&mddev->kobj, nm);
5375                                         rdev->raid_disk = -1;
5376                                 }
5377                         }
5378
5379                 if (mddev->degraded) {
5380                         ITERATE_RDEV(mddev,rdev,rtmp)
5381                                 if (rdev->raid_disk < 0
5382                                     && !test_bit(Faulty, &rdev->flags)) {
5383                                         rdev->recovery_offset = 0;
5384                                         if (mddev->pers->hot_add_disk(mddev,rdev)) {
5385                                                 char nm[20];
5386                                                 sprintf(nm, "rd%d", rdev->raid_disk);
5387                                                 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
5388                                                 spares++;
5389                                                 md_new_event(mddev);
5390                                         } else
5391                                                 break;
5392                                 }
5393                 }
5394
5395                 if (spares) {
5396                         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5397                         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
5398                 } else if (mddev->recovery_cp < MaxSector) {
5399                         set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
5400                 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
5401                         /* nothing to be done ... */
5402                         goto unlock;
5403
5404                 if (mddev->pers->sync_request) {
5405                         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
5406                         if (spares && mddev->bitmap && ! mddev->bitmap->file) {
5407                                 /* We are adding a device or devices to an array
5408                                  * which has the bitmap stored on all devices.
5409                                  * So make sure all bitmap pages get written
5410                                  */
5411                                 bitmap_write_all(mddev->bitmap);
5412                         }
5413                         mddev->sync_thread = md_register_thread(md_do_sync,
5414                                                                 mddev,
5415                                                                 "%s_resync");
5416                         if (!mddev->sync_thread) {
5417                                 printk(KERN_ERR "%s: could not start resync"
5418                                         " thread...\n", 
5419                                         mdname(mddev));
5420                                 /* leave the spares where they are, it shouldn't hurt */
5421                                 mddev->recovery = 0;
5422                         } else
5423                                 md_wakeup_thread(mddev->sync_thread);
5424                         md_new_event(mddev);
5425                 }
5426         unlock:
5427                 mddev_unlock(mddev);
5428         }
5429 }
5430
5431 static int md_notify_reboot(struct notifier_block *this,
5432                             unsigned long code, void *x)
5433 {
5434         struct list_head *tmp;
5435         mddev_t *mddev;
5436
5437         if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) {
5438
5439                 printk(KERN_INFO "md: stopping all md devices.\n");
5440
5441                 ITERATE_MDDEV(mddev,tmp)
5442                         if (mddev_trylock(mddev)) {
5443                                 do_md_stop (mddev, 1);
5444                                 mddev_unlock(mddev);
5445                         }
5446                 /*
5447                  * certain more exotic SCSI devices are known to be
5448                  * volatile wrt too early system reboots. While the
5449                  * right place to handle this issue is the given
5450                  * driver, we do want to have a safe RAID driver ...
5451                  */
5452                 mdelay(1000*1);
5453         }
5454         return NOTIFY_DONE;
5455 }
5456
5457 static struct notifier_block md_notifier = {
5458         .notifier_call  = md_notify_reboot,
5459         .next           = NULL,
5460         .priority       = INT_MAX, /* before any real devices */
5461 };
5462
5463 static void md_geninit(void)
5464 {
5465         struct proc_dir_entry *p;
5466
5467         dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
5468
5469         p = create_proc_entry("mdstat", S_IRUGO, NULL);
5470         if (p)
5471                 p->proc_fops = &md_seq_fops;
5472 }
5473
5474 static int __init md_init(void)
5475 {
5476         int minor;
5477
5478         printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d,"
5479                         " MD_SB_DISKS=%d\n",
5480                         MD_MAJOR_VERSION, MD_MINOR_VERSION,
5481                         MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS);
5482         printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI,
5483                         BITMAP_MINOR);
5484
5485         if (register_blkdev(MAJOR_NR, "md"))
5486                 return -1;
5487         if ((mdp_major=register_blkdev(0, "mdp"))<=0) {
5488                 unregister_blkdev(MAJOR_NR, "md");
5489                 return -1;
5490         }
5491         devfs_mk_dir("md");
5492         blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE,
5493                                 md_probe, NULL, NULL);
5494         blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE,
5495                             md_probe, NULL, NULL);
5496
5497         for (minor=0; minor < MAX_MD_DEVS; ++minor)
5498                 devfs_mk_bdev(MKDEV(MAJOR_NR, minor),
5499                                 S_IFBLK|S_IRUSR|S_IWUSR,
5500                                 "md/%d", minor);
5501
5502         for (minor=0; minor < MAX_MD_DEVS; ++minor)
5503                 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift),
5504                               S_IFBLK|S_IRUSR|S_IWUSR,
5505                               "md/mdp%d", minor);
5506
5507
5508         register_reboot_notifier(&md_notifier);
5509         raid_table_header = register_sysctl_table(raid_root_table, 1);
5510
5511         md_geninit();
5512         return (0);
5513 }
5514
5515
5516 #ifndef MODULE
5517
5518 /*
5519  * Searches all registered partitions for autorun RAID arrays
5520  * at boot time.
5521  */
5522 static dev_t detected_devices[128];
5523 static int dev_cnt;
5524
5525 void md_autodetect_dev(dev_t dev)
5526 {
5527         if (dev_cnt >= 0 && dev_cnt < 127)
5528                 detected_devices[dev_cnt++] = dev;
5529 }
5530
5531
5532 static void autostart_arrays(int part)
5533 {
5534         mdk_rdev_t *rdev;
5535         int i;
5536
5537         printk(KERN_INFO "md: Autodetecting RAID arrays.\n");
5538
5539         for (i = 0; i < dev_cnt; i++) {
5540                 dev_t dev = detected_devices[i];
5541
5542                 rdev = md_import_device(dev,0, 0);
5543                 if (IS_ERR(rdev))
5544                         continue;
5545
5546                 if (test_bit(Faulty, &rdev->flags)) {
5547                         MD_BUG();
5548                         continue;
5549                 }
5550                 list_add(&rdev->same_set, &pending_raid_disks);
5551         }
5552         dev_cnt = 0;
5553
5554         autorun_devices(part);
5555 }
5556
5557 #endif
5558
5559 static __exit void md_exit(void)
5560 {
5561         mddev_t *mddev;
5562         struct list_head *tmp;
5563         int i;
5564         blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS);
5565         blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift);
5566         for (i=0; i < MAX_MD_DEVS; i++)
5567                 devfs_remove("md/%d", i);
5568         for (i=0; i < MAX_MD_DEVS; i++)
5569                 devfs_remove("md/d%d", i);
5570
5571         devfs_remove("md");
5572
5573         unregister_blkdev(MAJOR_NR,"md");
5574         unregister_blkdev(mdp_major, "mdp");
5575         unregister_reboot_notifier(&md_notifier);
5576         unregister_sysctl_table(raid_table_header);
5577         remove_proc_entry("mdstat", NULL);
5578         ITERATE_MDDEV(mddev,tmp) {
5579                 struct gendisk *disk = mddev->gendisk;
5580                 if (!disk)
5581                         continue;
5582                 export_array(mddev);
5583                 del_gendisk(disk);
5584                 put_disk(disk);
5585                 mddev->gendisk = NULL;
5586                 mddev_put(mddev);
5587         }
5588 }
5589
5590 module_init(md_init)
5591 module_exit(md_exit)
5592
5593 static int get_ro(char *buffer, struct kernel_param *kp)
5594 {
5595         return sprintf(buffer, "%d", start_readonly);
5596 }
5597 static int set_ro(const char *val, struct kernel_param *kp)
5598 {
5599         char *e;
5600         int num = simple_strtoul(val, &e, 10);
5601         if (*val && (*e == '\0' || *e == '\n')) {
5602                 start_readonly = num;
5603                 return 0;
5604         }
5605         return -EINVAL;
5606 }
5607
5608 module_param_call(start_ro, set_ro, get_ro, NULL, 0600);
5609 module_param(start_dirty_degraded, int, 0644);
5610
5611
5612 EXPORT_SYMBOL(register_md_personality);
5613 EXPORT_SYMBOL(unregister_md_personality);
5614 EXPORT_SYMBOL(md_error);
5615 EXPORT_SYMBOL(md_done_sync);
5616 EXPORT_SYMBOL(md_write_start);
5617 EXPORT_SYMBOL(md_write_end);
5618 EXPORT_SYMBOL(md_register_thread);
5619 EXPORT_SYMBOL(md_unregister_thread);
5620 EXPORT_SYMBOL(md_wakeup_thread);
5621 EXPORT_SYMBOL(md_check_recovery);
5622 MODULE_LICENSE("GPL");
5623 MODULE_ALIAS("md");
5624 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);