2 * Copyright (C) 2015, SUSE
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
12 #include <linux/module.h>
13 #include <linux/dlm.h>
14 #include <linux/sched.h>
15 #include <linux/raid/md_p.h>
18 #include "md-cluster.h"
21 #define NEW_DEV_TIMEOUT 5000
23 struct dlm_lock_resource {
26 char *name; /* lock name. */
27 uint32_t flags; /* flags to pass to dlm_lock() */
28 struct completion completion; /* completion for synchronized locking */
29 void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
30 struct mddev *mddev; /* pointing back to mddev. */
38 struct list_head list;
46 /* md_cluster_info flags */
47 #define MD_CLUSTER_WAITING_FOR_NEWDISK 1
48 #define MD_CLUSTER_SUSPEND_READ_BALANCING 2
49 #define MD_CLUSTER_BEGIN_JOIN_CLUSTER 3
51 /* Lock the send communication. This is done through
52 * bit manipulation as opposed to a mutex in order to
53 * accomodate lock and hold. See next comment.
55 #define MD_CLUSTER_SEND_LOCK 4
56 /* If cluster operations (such as adding a disk) must lock the
57 * communication channel, so as to perform extra operations
58 * (update metadata) and no other operation is allowed on the
59 * MD. Token needs to be locked and held until the operation
60 * completes witha md_update_sb(), which would eventually release
63 #define MD_CLUSTER_SEND_LOCKED_ALREADY 5
66 struct md_cluster_info {
67 /* dlm lock space and resources for clustered raid. */
68 dlm_lockspace_t *lockspace;
70 struct completion completion;
71 struct mutex recv_mutex;
72 struct dlm_lock_resource *bitmap_lockres;
73 struct dlm_lock_resource **other_bitmap_lockres;
74 struct dlm_lock_resource *resync_lockres;
75 struct list_head suspend_list;
76 spinlock_t suspend_lock;
77 struct md_thread *recovery_thread;
78 unsigned long recovery_map;
79 /* communication loc resources */
80 struct dlm_lock_resource *ack_lockres;
81 struct dlm_lock_resource *message_lockres;
82 struct dlm_lock_resource *token_lockres;
83 struct dlm_lock_resource *no_new_dev_lockres;
84 struct md_thread *recv_thread;
85 struct completion newdisk_completion;
86 wait_queue_head_t wait;
102 /* TODO: Unionize this for smaller footprint */
109 static void sync_ast(void *arg)
111 struct dlm_lock_resource *res;
114 complete(&res->completion);
117 static int dlm_lock_sync(struct dlm_lock_resource *res, int mode)
121 ret = dlm_lock(res->ls, mode, &res->lksb,
122 res->flags, res->name, strlen(res->name),
123 0, sync_ast, res, res->bast);
126 wait_for_completion(&res->completion);
127 if (res->lksb.sb_status == 0)
129 return res->lksb.sb_status;
132 static int dlm_unlock_sync(struct dlm_lock_resource *res)
134 return dlm_lock_sync(res, DLM_LOCK_NL);
137 static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
138 char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
140 struct dlm_lock_resource *res = NULL;
142 struct md_cluster_info *cinfo = mddev->cluster_info;
144 res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
147 init_completion(&res->completion);
148 res->ls = cinfo->lockspace;
150 res->mode = DLM_LOCK_IV;
151 namelen = strlen(name);
152 res->name = kzalloc(namelen + 1, GFP_KERNEL);
154 pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name);
157 strlcpy(res->name, name, namelen + 1);
159 res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL);
160 if (!res->lksb.sb_lvbptr) {
161 pr_err("md-cluster: Unable to allocate LVB for resource %s\n", name);
164 res->flags = DLM_LKF_VALBLK;
170 res->flags |= DLM_LKF_EXPEDITE;
172 ret = dlm_lock_sync(res, DLM_LOCK_NL);
174 pr_err("md-cluster: Unable to lock NL on new lock resource %s\n", name);
177 res->flags &= ~DLM_LKF_EXPEDITE;
178 res->flags |= DLM_LKF_CONVERT;
182 kfree(res->lksb.sb_lvbptr);
188 static void lockres_free(struct dlm_lock_resource *res)
195 /* cancel a lock request or a conversion request that is blocked */
196 res->flags |= DLM_LKF_CANCEL;
198 ret = dlm_unlock(res->ls, res->lksb.sb_lkid, 0, &res->lksb, res);
199 if (unlikely(ret != 0)) {
200 pr_info("%s: failed to unlock %s return %d\n", __func__, res->name, ret);
202 /* if a lock conversion is cancelled, then the lock is put
203 * back to grant queue, need to ensure it is unlocked */
204 if (ret == -DLM_ECANCEL)
207 res->flags &= ~DLM_LKF_CANCEL;
208 wait_for_completion(&res->completion);
211 kfree(res->lksb.sb_lvbptr);
215 static void add_resync_info(struct dlm_lock_resource *lockres,
216 sector_t lo, sector_t hi)
218 struct resync_info *ri;
220 ri = (struct resync_info *)lockres->lksb.sb_lvbptr;
221 ri->lo = cpu_to_le64(lo);
222 ri->hi = cpu_to_le64(hi);
225 static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
227 struct resync_info ri;
228 struct suspend_info *s = NULL;
231 dlm_lock_sync(lockres, DLM_LOCK_CR);
232 memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
233 hi = le64_to_cpu(ri.hi);
235 s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
239 s->lo = le64_to_cpu(ri.lo);
241 dlm_unlock_sync(lockres);
246 static void recover_bitmaps(struct md_thread *thread)
248 struct mddev *mddev = thread->mddev;
249 struct md_cluster_info *cinfo = mddev->cluster_info;
250 struct dlm_lock_resource *bm_lockres;
253 struct suspend_info *s, *tmp;
256 while (cinfo->recovery_map) {
257 slot = fls64((u64)cinfo->recovery_map) - 1;
259 /* Clear suspend_area associated with the bitmap */
260 spin_lock_irq(&cinfo->suspend_lock);
261 list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
262 if (slot == s->slot) {
266 spin_unlock_irq(&cinfo->suspend_lock);
268 snprintf(str, 64, "bitmap%04d", slot);
269 bm_lockres = lockres_init(mddev, str, NULL, 1);
271 pr_err("md-cluster: Cannot initialize bitmaps\n");
275 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
277 pr_err("md-cluster: Could not DLM lock %s: %d\n",
281 ret = bitmap_copy_from_slot(mddev, slot, &lo, &hi, true);
283 pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
287 if (lo < mddev->recovery_cp)
288 mddev->recovery_cp = lo;
289 /* wake up thread to continue resync in case resync
291 if (mddev->recovery_cp != MaxSector) {
292 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
293 md_wakeup_thread(mddev->thread);
297 dlm_unlock_sync(bm_lockres);
299 lockres_free(bm_lockres);
300 clear_bit(slot, &cinfo->recovery_map);
304 static void recover_prep(void *arg)
306 struct mddev *mddev = arg;
307 struct md_cluster_info *cinfo = mddev->cluster_info;
308 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
311 static void __recover_slot(struct mddev *mddev, int slot)
313 struct md_cluster_info *cinfo = mddev->cluster_info;
315 set_bit(slot, &cinfo->recovery_map);
316 if (!cinfo->recovery_thread) {
317 cinfo->recovery_thread = md_register_thread(recover_bitmaps,
319 if (!cinfo->recovery_thread) {
320 pr_warn("md-cluster: Could not create recovery thread\n");
324 md_wakeup_thread(cinfo->recovery_thread);
327 static void recover_slot(void *arg, struct dlm_slot *slot)
329 struct mddev *mddev = arg;
330 struct md_cluster_info *cinfo = mddev->cluster_info;
332 pr_info("md-cluster: %s Node %d/%d down. My slot: %d. Initiating recovery.\n",
333 mddev->bitmap_info.cluster_name,
334 slot->nodeid, slot->slot,
336 /* deduct one since dlm slot starts from one while the num of
337 * cluster-md begins with 0 */
338 __recover_slot(mddev, slot->slot - 1);
341 static void recover_done(void *arg, struct dlm_slot *slots,
342 int num_slots, int our_slot,
345 struct mddev *mddev = arg;
346 struct md_cluster_info *cinfo = mddev->cluster_info;
348 cinfo->slot_number = our_slot;
349 /* completion is only need to be complete when node join cluster,
350 * it doesn't need to run during another node's failure */
351 if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
352 complete(&cinfo->completion);
353 clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
355 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
358 /* the ops is called when node join the cluster, and do lock recovery
359 * if node failure occurs */
360 static const struct dlm_lockspace_ops md_ls_ops = {
361 .recover_prep = recover_prep,
362 .recover_slot = recover_slot,
363 .recover_done = recover_done,
367 * The BAST function for the ack lock resource
368 * This function wakes up the receive thread in
369 * order to receive and process the message.
371 static void ack_bast(void *arg, int mode)
373 struct dlm_lock_resource *res = arg;
374 struct md_cluster_info *cinfo = res->mddev->cluster_info;
376 if (mode == DLM_LOCK_EX)
377 md_wakeup_thread(cinfo->recv_thread);
380 static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
382 struct suspend_info *s, *tmp;
384 list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
385 if (slot == s->slot) {
392 static void remove_suspend_info(struct mddev *mddev, int slot)
394 struct md_cluster_info *cinfo = mddev->cluster_info;
395 spin_lock_irq(&cinfo->suspend_lock);
396 __remove_suspend_info(cinfo, slot);
397 spin_unlock_irq(&cinfo->suspend_lock);
398 mddev->pers->quiesce(mddev, 2);
402 static void process_suspend_info(struct mddev *mddev,
403 int slot, sector_t lo, sector_t hi)
405 struct md_cluster_info *cinfo = mddev->cluster_info;
406 struct suspend_info *s;
409 remove_suspend_info(mddev, slot);
410 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
411 md_wakeup_thread(mddev->thread);
414 s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
420 mddev->pers->quiesce(mddev, 1);
421 mddev->pers->quiesce(mddev, 0);
422 spin_lock_irq(&cinfo->suspend_lock);
423 /* Remove existing entry (if exists) before adding */
424 __remove_suspend_info(cinfo, slot);
425 list_add(&s->list, &cinfo->suspend_list);
426 spin_unlock_irq(&cinfo->suspend_lock);
427 mddev->pers->quiesce(mddev, 2);
430 static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
433 struct md_cluster_info *cinfo = mddev->cluster_info;
434 char event_name[] = "EVENT=ADD_DEVICE";
436 char *envp[] = {event_name, disk_uuid, raid_slot, NULL};
439 len = snprintf(disk_uuid, 64, "DEVICE_UUID=");
440 sprintf(disk_uuid + len, "%pU", cmsg->uuid);
441 snprintf(raid_slot, 16, "RAID_DISK=%d", le32_to_cpu(cmsg->raid_slot));
442 pr_info("%s:%d Sending kobject change with %s and %s\n", __func__, __LINE__, disk_uuid, raid_slot);
443 init_completion(&cinfo->newdisk_completion);
444 set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
445 kobject_uevent_env(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE, envp);
446 wait_for_completion_timeout(&cinfo->newdisk_completion,
448 clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
452 static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
454 struct md_cluster_info *cinfo = mddev->cluster_info;
455 mddev->good_device_nr = le32_to_cpu(msg->raid_slot);
456 set_bit(MD_RELOAD_SB, &mddev->flags);
457 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
458 md_wakeup_thread(mddev->thread);
461 static void process_remove_disk(struct mddev *mddev, struct cluster_msg *msg)
463 struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
464 le32_to_cpu(msg->raid_slot));
467 set_bit(ClusterRemove, &rdev->flags);
468 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
469 md_wakeup_thread(mddev->thread);
472 pr_warn("%s: %d Could not find disk(%d) to REMOVE\n",
473 __func__, __LINE__, le32_to_cpu(msg->raid_slot));
476 static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
478 struct md_rdev *rdev = md_find_rdev_nr_rcu(mddev,
479 le32_to_cpu(msg->raid_slot));
481 if (rdev && test_bit(Faulty, &rdev->flags))
482 clear_bit(Faulty, &rdev->flags);
484 pr_warn("%s: %d Could not find disk(%d) which is faulty",
485 __func__, __LINE__, le32_to_cpu(msg->raid_slot));
488 static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
490 if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
491 "node %d received it's own msg\n", le32_to_cpu(msg->slot)))
493 switch (le32_to_cpu(msg->type)) {
494 case METADATA_UPDATED:
495 process_metadata_update(mddev, msg);
498 process_suspend_info(mddev, le32_to_cpu(msg->slot),
499 le64_to_cpu(msg->low),
500 le64_to_cpu(msg->high));
503 process_add_new_disk(mddev, msg);
506 process_remove_disk(mddev, msg);
509 process_readd_disk(mddev, msg);
511 case BITMAP_NEEDS_SYNC:
512 __recover_slot(mddev, le32_to_cpu(msg->slot));
515 pr_warn("%s:%d Received unknown message from %d\n",
516 __func__, __LINE__, msg->slot);
521 * thread for receiving message
523 static void recv_daemon(struct md_thread *thread)
525 struct md_cluster_info *cinfo = thread->mddev->cluster_info;
526 struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
527 struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
528 struct cluster_msg msg;
531 mutex_lock(&cinfo->recv_mutex);
532 /*get CR on Message*/
533 if (dlm_lock_sync(message_lockres, DLM_LOCK_CR)) {
534 pr_err("md/raid1:failed to get CR on MESSAGE\n");
535 mutex_unlock(&cinfo->recv_mutex);
539 /* read lvb and wake up thread to process this message_lockres */
540 memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
541 process_recvd_msg(thread->mddev, &msg);
543 /*release CR on ack_lockres*/
544 ret = dlm_unlock_sync(ack_lockres);
545 if (unlikely(ret != 0))
546 pr_info("unlock ack failed return %d\n", ret);
547 /*up-convert to PR on message_lockres*/
548 ret = dlm_lock_sync(message_lockres, DLM_LOCK_PR);
549 if (unlikely(ret != 0))
550 pr_info("lock PR on msg failed return %d\n", ret);
551 /*get CR on ack_lockres again*/
552 ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
553 if (unlikely(ret != 0))
554 pr_info("lock CR on ack failed return %d\n", ret);
555 /*release CR on message_lockres*/
556 ret = dlm_unlock_sync(message_lockres);
557 if (unlikely(ret != 0))
558 pr_info("unlock msg failed return %d\n", ret);
559 mutex_unlock(&cinfo->recv_mutex);
563 * Takes the lock on the TOKEN lock resource so no other
564 * node can communicate while the operation is underway.
566 static int lock_token(struct md_cluster_info *cinfo)
570 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
572 pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
573 __func__, __LINE__, error);
575 /* Lock the receive sequence */
576 mutex_lock(&cinfo->recv_mutex);
581 * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
583 static int lock_comm(struct md_cluster_info *cinfo)
585 wait_event(cinfo->wait,
586 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
588 return lock_token(cinfo);
591 static void unlock_comm(struct md_cluster_info *cinfo)
593 WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX);
594 mutex_unlock(&cinfo->recv_mutex);
595 dlm_unlock_sync(cinfo->token_lockres);
596 clear_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state);
597 wake_up(&cinfo->wait);
601 * This function performs the actual sending of the message. This function is
602 * usually called after performing the encompassing operation
604 * 1. Grabs the message lockresource in EX mode
605 * 2. Copies the message to the message LVB
606 * 3. Downconverts message lockresource to CW
607 * 4. Upconverts ack lock resource from CR to EX. This forces the BAST on other nodes
608 * and the other nodes read the message. The thread will wait here until all other
609 * nodes have released ack lock resource.
610 * 5. Downconvert ack lockresource to CR
612 static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
615 int slot = cinfo->slot_number - 1;
617 cmsg->slot = cpu_to_le32(slot);
618 /*get EX on Message*/
619 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
621 pr_err("md-cluster: failed to get EX on MESSAGE (%d)\n", error);
625 memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
626 sizeof(struct cluster_msg));
627 /*down-convert EX to CW on Message*/
628 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW);
630 pr_err("md-cluster: failed to convert EX to CW on MESSAGE(%d)\n",
635 /*up-convert CR to EX on Ack*/
636 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX);
638 pr_err("md-cluster: failed to convert CR to EX on ACK(%d)\n",
643 /*down-convert EX to CR on Ack*/
644 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR);
646 pr_err("md-cluster: failed to convert EX to CR on ACK(%d)\n",
652 error = dlm_unlock_sync(cinfo->message_lockres);
653 if (unlikely(error != 0)) {
654 pr_err("md-cluster: failed convert to NL on MESSAGE(%d)\n",
656 /* in case the message can't be released due to some reason */
663 static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
668 ret = __sendmsg(cinfo, cmsg);
673 static int gather_all_resync_info(struct mddev *mddev, int total_slots)
675 struct md_cluster_info *cinfo = mddev->cluster_info;
677 struct dlm_lock_resource *bm_lockres;
678 struct suspend_info *s;
683 for (i = 0; i < total_slots; i++) {
684 memset(str, '\0', 64);
685 snprintf(str, 64, "bitmap%04d", i);
686 bm_lockres = lockres_init(mddev, str, NULL, 1);
689 if (i == (cinfo->slot_number - 1)) {
690 lockres_free(bm_lockres);
694 bm_lockres->flags |= DLM_LKF_NOQUEUE;
695 ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
696 if (ret == -EAGAIN) {
697 memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
698 s = read_resync_info(mddev, bm_lockres);
700 pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
702 (unsigned long long) s->lo,
703 (unsigned long long) s->hi, i);
704 spin_lock_irq(&cinfo->suspend_lock);
706 list_add(&s->list, &cinfo->suspend_list);
707 spin_unlock_irq(&cinfo->suspend_lock);
710 lockres_free(bm_lockres);
714 lockres_free(bm_lockres);
718 /* Read the disk bitmap sb and check if it needs recovery */
719 ret = bitmap_copy_from_slot(mddev, i, &lo, &hi, false);
721 pr_warn("md-cluster: Could not gather bitmaps from slot %d", i);
722 lockres_free(bm_lockres);
725 if ((hi > 0) && (lo < mddev->recovery_cp)) {
726 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
727 mddev->recovery_cp = lo;
728 md_check_recovery(mddev);
731 dlm_unlock_sync(bm_lockres);
732 lockres_free(bm_lockres);
738 static int join(struct mddev *mddev, int nodes)
740 struct md_cluster_info *cinfo;
744 cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
748 INIT_LIST_HEAD(&cinfo->suspend_list);
749 spin_lock_init(&cinfo->suspend_lock);
750 init_completion(&cinfo->completion);
751 set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
752 init_waitqueue_head(&cinfo->wait);
753 mutex_init(&cinfo->recv_mutex);
755 mddev->cluster_info = cinfo;
758 sprintf(str, "%pU", mddev->uuid);
759 ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
760 DLM_LSFL_FS, LVB_SIZE,
761 &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
764 wait_for_completion(&cinfo->completion);
765 if (nodes < cinfo->slot_number) {
766 pr_err("md-cluster: Slot allotted(%d) is greater than available slots(%d).",
767 cinfo->slot_number, nodes);
771 /* Initiate the communication resources */
773 cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
774 if (!cinfo->recv_thread) {
775 pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
778 cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
779 if (!cinfo->message_lockres)
781 cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
782 if (!cinfo->token_lockres)
784 cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
785 if (!cinfo->ack_lockres)
787 cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
788 if (!cinfo->no_new_dev_lockres)
791 /* get sync CR lock on ACK. */
792 if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
793 pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
795 /* get sync CR lock on no-new-dev. */
796 if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
797 pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
800 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
801 snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
802 cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
803 if (!cinfo->bitmap_lockres)
805 if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
806 pr_err("Failed to get bitmap lock\n");
811 cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
812 if (!cinfo->resync_lockres)
815 ret = gather_all_resync_info(mddev, nodes);
821 md_unregister_thread(&cinfo->recovery_thread);
822 md_unregister_thread(&cinfo->recv_thread);
823 lockres_free(cinfo->message_lockres);
824 lockres_free(cinfo->token_lockres);
825 lockres_free(cinfo->ack_lockres);
826 lockres_free(cinfo->no_new_dev_lockres);
827 lockres_free(cinfo->resync_lockres);
828 lockres_free(cinfo->bitmap_lockres);
829 if (cinfo->lockspace)
830 dlm_release_lockspace(cinfo->lockspace, 2);
831 mddev->cluster_info = NULL;
836 static void resync_bitmap(struct mddev *mddev)
838 struct md_cluster_info *cinfo = mddev->cluster_info;
839 struct cluster_msg cmsg = {0};
842 cmsg.type = cpu_to_le32(BITMAP_NEEDS_SYNC);
843 err = sendmsg(cinfo, &cmsg);
845 pr_err("%s:%d: failed to send BITMAP_NEEDS_SYNC message (%d)\n",
846 __func__, __LINE__, err);
849 static void unlock_all_bitmaps(struct mddev *mddev);
850 static int leave(struct mddev *mddev)
852 struct md_cluster_info *cinfo = mddev->cluster_info;
857 /* BITMAP_NEEDS_SYNC message should be sent when node
858 * is leaving the cluster with dirty bitmap, also we
859 * can only deliver it when dlm connection is available */
860 if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector)
861 resync_bitmap(mddev);
863 md_unregister_thread(&cinfo->recovery_thread);
864 md_unregister_thread(&cinfo->recv_thread);
865 lockres_free(cinfo->message_lockres);
866 lockres_free(cinfo->token_lockres);
867 lockres_free(cinfo->ack_lockres);
868 lockres_free(cinfo->no_new_dev_lockres);
869 lockres_free(cinfo->resync_lockres);
870 lockres_free(cinfo->bitmap_lockres);
871 unlock_all_bitmaps(mddev);
872 dlm_release_lockspace(cinfo->lockspace, 2);
876 /* slot_number(): Returns the MD slot number to use
877 * DLM starts the slot numbers from 1, wheras cluster-md
878 * wants the number to be from zero, so we deduct one
880 static int slot_number(struct mddev *mddev)
882 struct md_cluster_info *cinfo = mddev->cluster_info;
884 return cinfo->slot_number - 1;
888 * Check if the communication is already locked, else lock the communication
890 * If it is already locked, token is in EX mode, and hence lock_token()
891 * should not be called.
893 static int metadata_update_start(struct mddev *mddev)
895 struct md_cluster_info *cinfo = mddev->cluster_info;
897 wait_event(cinfo->wait,
898 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state) ||
899 test_and_clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state));
901 /* If token is already locked, return 0 */
902 if (cinfo->token_lockres->mode == DLM_LOCK_EX)
905 return lock_token(cinfo);
908 static int metadata_update_finish(struct mddev *mddev)
910 struct md_cluster_info *cinfo = mddev->cluster_info;
911 struct cluster_msg cmsg;
912 struct md_rdev *rdev;
916 memset(&cmsg, 0, sizeof(cmsg));
917 cmsg.type = cpu_to_le32(METADATA_UPDATED);
918 /* Pick up a good active device number to send.
920 rdev_for_each(rdev, mddev)
921 if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
922 raid_slot = rdev->desc_nr;
925 if (raid_slot >= 0) {
926 cmsg.raid_slot = cpu_to_le32(raid_slot);
927 ret = __sendmsg(cinfo, &cmsg);
929 pr_warn("md-cluster: No good device id found to send\n");
930 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
935 static void metadata_update_cancel(struct mddev *mddev)
937 struct md_cluster_info *cinfo = mddev->cluster_info;
938 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
942 static int resync_start(struct mddev *mddev)
944 struct md_cluster_info *cinfo = mddev->cluster_info;
945 return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX);
948 static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
950 struct md_cluster_info *cinfo = mddev->cluster_info;
951 struct resync_info ri;
952 struct cluster_msg cmsg = {0};
954 /* do not send zero again, if we have sent before */
956 memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
957 if (le64_to_cpu(ri.hi) == 0)
961 add_resync_info(cinfo->bitmap_lockres, lo, hi);
962 /* Re-acquire the lock to refresh LVB */
963 dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
964 cmsg.type = cpu_to_le32(RESYNCING);
965 cmsg.low = cpu_to_le64(lo);
966 cmsg.high = cpu_to_le64(hi);
968 return sendmsg(cinfo, &cmsg);
971 static int resync_finish(struct mddev *mddev)
973 struct md_cluster_info *cinfo = mddev->cluster_info;
974 dlm_unlock_sync(cinfo->resync_lockres);
975 return resync_info_update(mddev, 0, 0);
978 static int area_resyncing(struct mddev *mddev, int direction,
979 sector_t lo, sector_t hi)
981 struct md_cluster_info *cinfo = mddev->cluster_info;
983 struct suspend_info *s;
985 if ((direction == READ) &&
986 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
989 spin_lock_irq(&cinfo->suspend_lock);
990 if (list_empty(&cinfo->suspend_list))
992 list_for_each_entry(s, &cinfo->suspend_list, list)
993 if (hi > s->lo && lo < s->hi) {
998 spin_unlock_irq(&cinfo->suspend_lock);
1002 /* add_new_disk() - initiates a disk add
1003 * However, if this fails before writing md_update_sb(),
1004 * add_new_disk_cancel() must be called to release token lock
1006 static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
1008 struct md_cluster_info *cinfo = mddev->cluster_info;
1009 struct cluster_msg cmsg;
1011 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1012 char *uuid = sb->device_uuid;
1014 memset(&cmsg, 0, sizeof(cmsg));
1015 cmsg.type = cpu_to_le32(NEWDISK);
1016 memcpy(cmsg.uuid, uuid, 16);
1017 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
1019 ret = __sendmsg(cinfo, &cmsg);
1022 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
1023 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
1024 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
1025 /* Some node does not "see" the device */
1031 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
1032 /* Since MD_CHANGE_DEVS will be set in add_bound_rdev which
1033 * will run soon after add_new_disk, the below path will be
1035 * md_wakeup_thread(mddev->thread)
1036 * -> conf->thread (raid1d)
1037 * -> md_check_recovery -> md_update_sb
1038 * -> metadata_update_start/finish
1039 * MD_CLUSTER_SEND_LOCKED_ALREADY will be cleared eventually.
1041 * For other failure cases, metadata_update_cancel and
1042 * add_new_disk_cancel also clear below bit as well.
1044 set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
1045 wake_up(&cinfo->wait);
1050 static void add_new_disk_cancel(struct mddev *mddev)
1052 struct md_cluster_info *cinfo = mddev->cluster_info;
1053 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
1057 static int new_disk_ack(struct mddev *mddev, bool ack)
1059 struct md_cluster_info *cinfo = mddev->cluster_info;
1061 if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) {
1062 pr_warn("md-cluster(%s): Spurious cluster confirmation\n", mdname(mddev));
1067 dlm_unlock_sync(cinfo->no_new_dev_lockres);
1068 complete(&cinfo->newdisk_completion);
1072 static int remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1074 struct cluster_msg cmsg = {0};
1075 struct md_cluster_info *cinfo = mddev->cluster_info;
1076 cmsg.type = cpu_to_le32(REMOVE);
1077 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
1078 return sendmsg(cinfo, &cmsg);
1081 static int lock_all_bitmaps(struct mddev *mddev)
1083 int slot, my_slot, ret, held = 1, i = 0;
1085 struct md_cluster_info *cinfo = mddev->cluster_info;
1087 cinfo->other_bitmap_lockres = kzalloc((mddev->bitmap_info.nodes - 1) *
1088 sizeof(struct dlm_lock_resource *),
1090 if (!cinfo->other_bitmap_lockres) {
1091 pr_err("md: can't alloc mem for other bitmap locks\n");
1095 my_slot = slot_number(mddev);
1096 for (slot = 0; slot < mddev->bitmap_info.nodes; slot++) {
1097 if (slot == my_slot)
1100 memset(str, '\0', 64);
1101 snprintf(str, 64, "bitmap%04d", slot);
1102 cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1);
1103 if (!cinfo->other_bitmap_lockres[i])
1106 cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE;
1107 ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW);
1116 static void unlock_all_bitmaps(struct mddev *mddev)
1118 struct md_cluster_info *cinfo = mddev->cluster_info;
1121 /* release other node's bitmap lock if they are existed */
1122 if (cinfo->other_bitmap_lockres) {
1123 for (i = 0; i < mddev->bitmap_info.nodes - 1; i++) {
1124 if (cinfo->other_bitmap_lockres[i]) {
1125 dlm_unlock_sync(cinfo->other_bitmap_lockres[i]);
1126 lockres_free(cinfo->other_bitmap_lockres[i]);
1129 kfree(cinfo->other_bitmap_lockres);
1133 static int gather_bitmaps(struct md_rdev *rdev)
1137 struct cluster_msg cmsg = {0};
1138 struct mddev *mddev = rdev->mddev;
1139 struct md_cluster_info *cinfo = mddev->cluster_info;
1141 cmsg.type = cpu_to_le32(RE_ADD);
1142 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
1143 err = sendmsg(cinfo, &cmsg);
1147 for (sn = 0; sn < mddev->bitmap_info.nodes; sn++) {
1148 if (sn == (cinfo->slot_number - 1))
1150 err = bitmap_copy_from_slot(mddev, sn, &lo, &hi, false);
1152 pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
1155 if ((hi > 0) && (lo < mddev->recovery_cp))
1156 mddev->recovery_cp = lo;
1162 static struct md_cluster_operations cluster_ops = {
1165 .slot_number = slot_number,
1166 .resync_start = resync_start,
1167 .resync_finish = resync_finish,
1168 .resync_info_update = resync_info_update,
1169 .metadata_update_start = metadata_update_start,
1170 .metadata_update_finish = metadata_update_finish,
1171 .metadata_update_cancel = metadata_update_cancel,
1172 .area_resyncing = area_resyncing,
1173 .add_new_disk = add_new_disk,
1174 .add_new_disk_cancel = add_new_disk_cancel,
1175 .new_disk_ack = new_disk_ack,
1176 .remove_disk = remove_disk,
1177 .gather_bitmaps = gather_bitmaps,
1178 .lock_all_bitmaps = lock_all_bitmaps,
1179 .unlock_all_bitmaps = unlock_all_bitmaps,
1182 static int __init cluster_init(void)
1184 pr_warn("md-cluster: EXPERIMENTAL. Use with caution\n");
1185 pr_info("Registering Cluster MD functions\n");
1186 register_md_cluster_operations(&cluster_ops, THIS_MODULE);
1190 static void cluster_exit(void)
1192 unregister_md_cluster_operations();
1195 module_init(cluster_init);
1196 module_exit(cluster_exit);
1197 MODULE_AUTHOR("SUSE");
1198 MODULE_LICENSE("GPL");
1199 MODULE_DESCRIPTION("Clustering support for MD");