]> git.karo-electronics.de Git - linux-beck.git/blobdiff - drivers/md/md.c
[PATCH] md: fix is_mddev_idle calculation now that disk/sector accounting happens...
[linux-beck.git] / drivers / md / md.c
index d002b8301fc22aa4b8ed5746223719f57dc5b76f..78c7418478d63e86f55127026d227bfae18e94a3 100644 (file)
@@ -1182,6 +1182,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
 {
        mdk_rdev_t *same_pdev;
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
+       struct kobject *ko;
 
        if (rdev->mddev) {
                MD_BUG();
@@ -1221,7 +1222,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        rdev->kobj.parent = &mddev->kobj;
        kobject_add(&rdev->kobj);
 
-       sysfs_create_link(&rdev->kobj, &rdev->bdev->bd_disk->kobj, "block");
+       if (rdev->bdev->bd_part)
+               ko = &rdev->bdev->bd_part->kobj;
+       else
+               ko = &rdev->bdev->bd_disk->kobj;
+       sysfs_create_link(&rdev->kobj, ko, "block");
        return 0;
 }
 
@@ -1499,7 +1504,7 @@ struct rdev_sysfs_entry {
 };
 
 static ssize_t
-rdev_show_state(mdk_rdev_t *rdev, char *page)
+state_show(mdk_rdev_t *rdev, char *page)
 {
        char *sep = "";
        int len=0;
@@ -1520,13 +1525,11 @@ rdev_show_state(mdk_rdev_t *rdev, char *page)
        return len+sprintf(page+len, "\n");
 }
 
-static struct rdev_sysfs_entry rdev_state = {
-       .attr = {.name = "state", .mode = S_IRUGO },
-       .show = rdev_show_state,
-};
+static struct rdev_sysfs_entry
+rdev_state = __ATTR_RO(state);
 
 static ssize_t
-rdev_show_super(mdk_rdev_t *rdev, char *page)
+super_show(mdk_rdev_t *rdev, char *page)
 {
        if (rdev->sb_loaded && rdev->sb_size) {
                memcpy(page, page_address(rdev->sb_page), rdev->sb_size);
@@ -1534,10 +1537,8 @@ rdev_show_super(mdk_rdev_t *rdev, char *page)
        } else
                return 0;
 }
-static struct rdev_sysfs_entry rdev_super = {
-       .attr = {.name = "super", .mode = S_IRUGO },
-       .show = rdev_show_super,
-};
+static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super);
+
 static struct attribute *rdev_default_attrs[] = {
        &rdev_state.attr,
        &rdev_super.attr,
@@ -1723,10 +1724,10 @@ static void analyze_sbs(mddev_t * mddev)
 }
 
 static ssize_t
-md_show_level(mddev_t *mddev, char *page)
+level_show(mddev_t *mddev, char *page)
 {
        mdk_personality_t *p = mddev->pers;
-       if (p == NULL)
+       if (p == NULL && mddev->raid_disks == 0)
                return 0;
        if (mddev->level >= 0)
                return sprintf(page, "RAID-%d\n", mddev->level);
@@ -1734,26 +1735,22 @@ md_show_level(mddev_t *mddev, char *page)
                return sprintf(page, "%s\n", p->name);
 }
 
-static struct md_sysfs_entry md_level = {
-       .attr = {.name = "level", .mode = S_IRUGO },
-       .show = md_show_level,
-};
+static struct md_sysfs_entry md_level = __ATTR_RO(level);
 
 static ssize_t
-md_show_rdisks(mddev_t *mddev, char *page)
+raid_disks_show(mddev_t *mddev, char *page)
 {
+       if (mddev->raid_disks == 0)
+               return 0;
        return sprintf(page, "%d\n", mddev->raid_disks);
 }
 
-static struct md_sysfs_entry md_raid_disks = {
-       .attr = {.name = "raid_disks", .mode = S_IRUGO },
-       .show = md_show_rdisks,
-};
+static struct md_sysfs_entry md_raid_disks = __ATTR_RO(raid_disks);
 
 static ssize_t
-md_show_scan(mddev_t *mddev, char *page)
+action_show(mddev_t *mddev, char *page)
 {
-       char *type = "none";
+       char *type = "idle";
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) {
                if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
@@ -1770,66 +1767,84 @@ md_show_scan(mddev_t *mddev, char *page)
 }
 
 static ssize_t
-md_store_scan(mddev_t *mddev, const char *page, size_t len)
+action_store(mddev_t *mddev, const char *page, size_t len)
 {
-       int canscan=0;
+       if (!mddev->pers || !mddev->pers->sync_request)
+               return -EINVAL;
+
+       if (strcmp(page, "idle")==0 || strcmp(page, "idle\n")==0) {
+               if (mddev->sync_thread) {
+                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+                       md_unregister_thread(mddev->sync_thread);
+                       mddev->sync_thread = NULL;
+                       mddev->recovery = 0;
+               }
+               return len;
+       }
 
        if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
            test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
-       down(&mddev->reconfig_sem);
-       if (mddev->pers && mddev->pers->sync_request)
-               canscan=1;
-       up(&mddev->reconfig_sem);
-       if (!canscan)
-               return -EINVAL;
-
-       if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
-               set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
-       else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
-               return -EINVAL;
-       set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
-       set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
-       set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       if (strcmp(page, "resync")==0 || strcmp(page, "resync\n")==0 ||
+           strcmp(page, "recover")==0 || strcmp(page, "recover\n")==0)
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       else {
+               if (strcmp(page, "check")==0 || strcmp(page, "check\n")==0)
+                       set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+               else if (strcmp(page, "repair")!=0 && strcmp(page, "repair\n")!=0)
+                       return -EINVAL;
+               set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+               set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+       }
        md_wakeup_thread(mddev->thread);
        return len;
 }
 
 static ssize_t
-md_show_mismatch(mddev_t *mddev, char *page)
+mismatch_cnt_show(mddev_t *mddev, char *page)
 {
        return sprintf(page, "%llu\n",
                       (unsigned long long) mddev->resync_mismatches);
 }
 
-static struct md_sysfs_entry md_scan_mode = {
-       .attr = {.name = "scan_mode", .mode = S_IRUGO|S_IWUSR },
-       .show = md_show_scan,
-       .store = md_store_scan,
-};
+static struct md_sysfs_entry
+md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
 
-static struct md_sysfs_entry md_mismatches = {
-       .attr = {.name = "mismatch_cnt", .mode = S_IRUGO },
-       .show = md_show_mismatch,
-};
+
+static struct md_sysfs_entry
+md_mismatches = __ATTR_RO(mismatch_cnt);
 
 static struct attribute *md_default_attrs[] = {
        &md_level.attr,
        &md_raid_disks.attr,
+       NULL,
+};
+
+static struct attribute *md_redundancy_attrs[] = {
        &md_scan_mode.attr,
        &md_mismatches.attr,
        NULL,
 };
+static struct attribute_group md_redundancy_group = {
+       .name = NULL,
+       .attrs = md_redundancy_attrs,
+};
+
 
 static ssize_t
 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
        struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
        mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+       ssize_t rv;
 
        if (!entry->show)
                return -EIO;
-       return entry->show(mddev, page);
+       mddev_lock(mddev);
+       rv = entry->show(mddev, page);
+       mddev_unlock(mddev);
+       return rv;
 }
 
 static ssize_t
@@ -1838,10 +1853,14 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
 {
        struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
        mddev_t *mddev = container_of(kobj, struct mddev_s, kobj);
+       ssize_t rv;
 
        if (!entry->store)
                return -EIO;
-       return entry->store(mddev, page, length);
+       mddev_lock(mddev);
+       rv = entry->store(mddev, page, length);
+       mddev_unlock(mddev);
+       return rv;
 }
 
 static void md_free(struct kobject *ko)
@@ -2048,6 +2067,11 @@ static int do_md_run(mddev_t * mddev)
                bitmap_destroy(mddev);
                return err;
        }
+       if (mddev->pers->sync_request)
+               sysfs_create_group(&mddev->kobj, &md_redundancy_group);
+       else if (mddev->ro == 2) /* auto-readonly not meaningful */
+               mddev->ro = 0;
+
        atomic_set(&mddev->writes_pending,0);
        mddev->safemode = 0;
        mddev->safemode_timer.function = md_safemode_timeout;
@@ -2156,6 +2180,9 @@ static int do_md_stop(mddev_t * mddev, int ro)
                                set_disk_ro(disk, 0);
                        blk_queue_make_request(mddev->queue, md_fail_request);
                        mddev->pers->stop(mddev);
+                       if (mddev->pers->sync_request)
+                               sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+
                        module_put(mddev->pers->owner);
                        mddev->pers = NULL;
                        if (mddev->ro)
@@ -3129,7 +3156,7 @@ static int md_ioctl(struct inode *inode, struct file *file,
                if (cnt > 0 ) {
                        printk(KERN_WARNING
                               "md: %s(pid %d) used deprecated START_ARRAY ioctl. "
-                              "This will not be supported beyond 2.6\n",
+                              "This will not be supported beyond July 2006\n",
                               current->comm, current->pid);
                        cnt--;
                }
@@ -3408,21 +3435,26 @@ static int md_thread(void * arg)
         */
 
        allow_signal(SIGKILL);
-       complete(thread->event);
        while (!kthread_should_stop()) {
-               void (*run)(mddev_t *);
 
-               wait_event_interruptible_timeout(thread->wqueue,
-                                                test_bit(THREAD_WAKEUP, &thread->flags)
-                                                || kthread_should_stop(),
-                                                thread->timeout);
+               /* We need to wait INTERRUPTIBLE so that
+                * we don't add to the load-average.
+                * That means we need to be sure no signals are
+                * pending
+                */
+               if (signal_pending(current))
+                       flush_signals(current);
+
+               wait_event_interruptible_timeout
+                       (thread->wqueue,
+                        test_bit(THREAD_WAKEUP, &thread->flags)
+                        || kthread_should_stop(),
+                        thread->timeout);
                try_to_freeze();
 
                clear_bit(THREAD_WAKEUP, &thread->flags);
 
-               run = thread->run;
-               if (run)
-                       run(thread->mddev);
+               thread->run(thread->mddev);
        }
 
        return 0;
@@ -3441,7 +3473,6 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
                                 const char *name)
 {
        mdk_thread_t *thread;
-       struct completion event;
 
        thread = kmalloc(sizeof(mdk_thread_t), GFP_KERNEL);
        if (!thread)
@@ -3450,18 +3481,14 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
        memset(thread, 0, sizeof(mdk_thread_t));
        init_waitqueue_head(&thread->wqueue);
 
-       init_completion(&event);
-       thread->event = &event;
        thread->run = run;
        thread->mddev = mddev;
-       thread->name = name;
        thread->timeout = MAX_SCHEDULE_TIMEOUT;
        thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev));
        if (IS_ERR(thread->tsk)) {
                kfree(thread);
                return NULL;
        }
-       wait_for_completion(&event);
        return thread;
 }
 
@@ -3709,13 +3736,15 @@ static int md_seq_show(struct seq_file *seq, void *v)
                if (mddev->pers) {
                        mddev->pers->status (seq, mddev);
                        seq_printf(seq, "\n      ");
-                       if (mddev->curr_resync > 2) {
-                               status_resync (seq, mddev);
-                               seq_printf(seq, "\n      ");
-                       } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
-                               seq_printf(seq, "\tresync=DELAYED\n      ");
-                       else if (mddev->recovery_cp < MaxSector)
-                               seq_printf(seq, "\tresync=PENDING\n      ");
+                       if (mddev->pers->sync_request) {
+                               if (mddev->curr_resync > 2) {
+                                       status_resync (seq, mddev);
+                                       seq_printf(seq, "\n      ");
+                               } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2)
+                                       seq_printf(seq, "\tresync=DELAYED\n      ");
+                               else if (mddev->recovery_cp < MaxSector)
+                                       seq_printf(seq, "\tresync=PENDING\n      ");
+                       }
                } else
                        seq_printf(seq, "\n       ");
 
@@ -3817,11 +3846,20 @@ static int is_mddev_idle(mddev_t *mddev)
                curr_events = disk_stat_read(disk, sectors[0]) + 
                                disk_stat_read(disk, sectors[1]) - 
                                atomic_read(&disk->sync_io);
-               /* Allow some slack between valud of curr_events and last_events,
-                * as there are some uninteresting races.
+               /* The difference between curr_events and last_events
+                * will be affected by any new non-sync IO (making
+                * curr_events bigger) and any difference in the amount of
+                * in-flight syncio (making current_events bigger or smaller)
+                * The amount in-flight is currently limited to
+                * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6
+                * which is at most 4096 sectors.
+                * These numbers are fairly fragile and should be made
+                * more robust, probably by enforcing the
+                * 'window size' that md_do_sync sort-of uses.
+                *
                 * Note: the following is an unsigned comparison.
                 */
-               if ((curr_events - rdev->last_events + 32) > 64) {
+               if ((curr_events - rdev->last_events + 4096) > 8192) {
                        rdev->last_events = curr_events;
                        idle = 0;
                }
@@ -3923,9 +3961,7 @@ static void md_do_sync(mddev_t *mddev)
                mddev->curr_resync = 2;
 
        try_again:
-               if (signal_pending(current) ||
-                   kthread_should_stop()) {
-                       flush_signals(current);
+               if (kthread_should_stop()) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto skip;
                }
@@ -3945,9 +3981,8 @@ static void md_do_sync(mddev_t *mddev)
                                         * time 'round when curr_resync == 2
                                         */
                                        continue;
-                               prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
-                               if (!signal_pending(current) &&
-                                   !kthread_should_stop() &&
+                               prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE);
+                               if (!kthread_should_stop() &&
                                    mddev2->curr_resync >= mddev->curr_resync) {
                                        printk(KERN_INFO "md: delaying resync of %s"
                                               " until %s has finished resync (they"
@@ -4056,13 +4091,12 @@ static void md_do_sync(mddev_t *mddev)
                }
 
 
-               if (signal_pending(current) || kthread_should_stop()) {
+               if (kthread_should_stop()) {
                        /*
                         * got a signal, exit.
                         */
                        printk(KERN_INFO 
                                "md: md_do_sync() got signal ... exiting\n");
-                       flush_signals(current);
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                        goto out;
                }
@@ -4084,7 +4118,7 @@ static void md_do_sync(mddev_t *mddev)
                if (currspeed > sysctl_speed_limit_min) {
                        if ((currspeed > sysctl_speed_limit_max) ||
                                        !is_mddev_idle(mddev)) {
-                               msleep_interruptible(250);
+                               msleep(500);
                                goto repeat;
                        }
                }