1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * defines domain join / leave apis
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <linux/delay.h>
34 #include <linux/err.h>
35 #include <linux/debugfs.h>
37 #include "cluster/heartbeat.h"
38 #include "cluster/nodemanager.h"
39 #include "cluster/tcp.h"
42 #include "dlmcommon.h"
43 #include "dlmdomain.h"
48 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
49 #include "cluster/masklog.h"
52 * ocfs2 node maps are array of long int, which limits to send them freely
53 * across the wire due to endianness issues. To workaround this, we convert
54 * long ints to byte arrays. Following 3 routines are helper functions to
55 * set/test/copy bits within those array of bytes
57 static inline void byte_set_bit(u8 nr, u8 map[])
59 map[nr >> 3] |= (1UL << (nr & 7));
62 static inline int byte_test_bit(u8 nr, u8 map[])
64 return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
67 static inline void byte_copymap(u8 dmap[], unsigned long smap[],
75 memset(dmap, 0, ((sz + 7) >> 3));
76 for (nn = 0 ; nn < sz; nn++)
77 if (test_bit(nn, smap))
78 byte_set_bit(nn, dmap);
81 static void dlm_free_pagevec(void **vec, int pages)
84 free_page((unsigned long)vec[pages]);
88 static void **dlm_alloc_pagevec(int pages)
90 void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
96 for (i = 0; i < pages; i++)
97 if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
100 mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
101 pages, (unsigned long)DLM_HASH_PAGES,
102 (unsigned long)DLM_BUCKETS_PER_PAGE);
105 dlm_free_pagevec(vec, i);
111 * spinlock lock ordering: if multiple locks are needed, obey this ordering:
113 * struct dlm_ctxt->spinlock
114 * struct dlm_lock_resource->spinlock
115 * struct dlm_ctxt->master_lock
116 * struct dlm_ctxt->ast_lock
117 * dlm_master_list_entry->spinlock
122 DEFINE_SPINLOCK(dlm_domain_lock);
123 LIST_HEAD(dlm_domains);
124 static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
127 * The supported protocol version for DLM communication. Running domains
128 * will have a negotiated version with the same major number and a minor
129 * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should
130 * be used to determine what a running domain is actually using.
132 static const struct dlm_protocol_version dlm_protocol = {
137 #define DLM_DOMAIN_BACKOFF_MS 200
139 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
141 static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
143 static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
145 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
147 static int dlm_protocol_compare(struct dlm_protocol_version *existing,
148 struct dlm_protocol_version *request);
150 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
152 void __dlm_unhash_lockres(struct dlm_lock_resource *lockres)
154 if (!hlist_unhashed(&lockres->hash_node)) {
155 hlist_del_init(&lockres->hash_node);
156 dlm_lockres_put(lockres);
160 void __dlm_insert_lockres(struct dlm_ctxt *dlm,
161 struct dlm_lock_resource *res)
163 struct hlist_head *bucket;
166 assert_spin_locked(&dlm->spinlock);
169 bucket = dlm_lockres_hash(dlm, q->hash);
171 /* get a reference for our hashtable */
172 dlm_lockres_get(res);
174 hlist_add_head(&res->hash_node, bucket);
177 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
182 struct hlist_head *bucket;
183 struct hlist_node *list;
185 mlog_entry("%.*s\n", len, name);
187 assert_spin_locked(&dlm->spinlock);
189 bucket = dlm_lockres_hash(dlm, hash);
191 hlist_for_each(list, bucket) {
192 struct dlm_lock_resource *res = hlist_entry(list,
193 struct dlm_lock_resource, hash_node);
194 if (res->lockname.name[0] != name[0])
196 if (unlikely(res->lockname.len != len))
198 if (memcmp(res->lockname.name + 1, name + 1, len - 1))
200 dlm_lockres_get(res);
206 /* intended to be called by functions which do not care about lock
207 * resources which are being purged (most net _handler functions).
208 * this will return NULL for any lock resource which is found but
209 * currently in the process of dropping its mastery reference.
210 * use __dlm_lookup_lockres_full when you need the lock resource
211 * regardless (e.g. dlm_get_lock_resource) */
212 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
217 struct dlm_lock_resource *res = NULL;
219 mlog_entry("%.*s\n", len, name);
221 assert_spin_locked(&dlm->spinlock);
223 res = __dlm_lookup_lockres_full(dlm, name, len, hash);
225 spin_lock(&res->spinlock);
226 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
227 spin_unlock(&res->spinlock);
228 dlm_lockres_put(res);
231 spin_unlock(&res->spinlock);
237 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
241 struct dlm_lock_resource *res;
242 unsigned int hash = dlm_lockid_hash(name, len);
244 spin_lock(&dlm->spinlock);
245 res = __dlm_lookup_lockres(dlm, name, len, hash);
246 spin_unlock(&dlm->spinlock);
250 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
252 struct dlm_ctxt *tmp = NULL;
253 struct list_head *iter;
255 assert_spin_locked(&dlm_domain_lock);
257 /* tmp->name here is always NULL terminated,
258 * but domain may not be! */
259 list_for_each(iter, &dlm_domains) {
260 tmp = list_entry (iter, struct dlm_ctxt, list);
261 if (strlen(tmp->name) == len &&
262 memcmp(tmp->name, domain, len)==0)
270 /* For null terminated domain strings ONLY */
271 static struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
273 assert_spin_locked(&dlm_domain_lock);
275 return __dlm_lookup_domain_full(domain, strlen(domain));
279 /* returns true on one of two conditions:
280 * 1) the domain does not exist
281 * 2) the domain exists and it's state is "joined" */
282 static int dlm_wait_on_domain_helper(const char *domain)
285 struct dlm_ctxt *tmp = NULL;
287 spin_lock(&dlm_domain_lock);
289 tmp = __dlm_lookup_domain(domain);
292 else if (tmp->dlm_state == DLM_CTXT_JOINED)
295 spin_unlock(&dlm_domain_lock);
299 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
301 dlm_destroy_debugfs_subroot(dlm);
303 if (dlm->lockres_hash)
304 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
306 if (dlm->master_hash)
307 dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
315 /* A little strange - this function will be called while holding
316 * dlm_domain_lock and is expected to be holding it on the way out. We
317 * will however drop and reacquire it multiple times */
318 static void dlm_ctxt_release(struct kref *kref)
320 struct dlm_ctxt *dlm;
322 dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
324 BUG_ON(dlm->num_joins);
325 BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
327 /* we may still be in the list if we hit an error during join. */
328 list_del_init(&dlm->list);
330 spin_unlock(&dlm_domain_lock);
332 mlog(0, "freeing memory from domain %s\n", dlm->name);
334 wake_up(&dlm_domain_events);
336 dlm_free_ctxt_mem(dlm);
338 spin_lock(&dlm_domain_lock);
341 void dlm_put(struct dlm_ctxt *dlm)
343 spin_lock(&dlm_domain_lock);
344 kref_put(&dlm->dlm_refs, dlm_ctxt_release);
345 spin_unlock(&dlm_domain_lock);
348 static void __dlm_get(struct dlm_ctxt *dlm)
350 kref_get(&dlm->dlm_refs);
353 /* given a questionable reference to a dlm object, gets a reference if
354 * it can find it in the list, otherwise returns NULL in which case
355 * you shouldn't trust your pointer. */
356 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
358 struct list_head *iter;
359 struct dlm_ctxt *target = NULL;
361 spin_lock(&dlm_domain_lock);
363 list_for_each(iter, &dlm_domains) {
364 target = list_entry (iter, struct dlm_ctxt, list);
374 spin_unlock(&dlm_domain_lock);
379 int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
383 spin_lock(&dlm_domain_lock);
384 ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
385 (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
386 spin_unlock(&dlm_domain_lock);
391 static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
393 if (dlm->dlm_worker) {
394 flush_workqueue(dlm->dlm_worker);
395 destroy_workqueue(dlm->dlm_worker);
396 dlm->dlm_worker = NULL;
400 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
402 dlm_unregister_domain_handlers(dlm);
403 dlm_debug_shutdown(dlm);
404 dlm_complete_thread(dlm);
405 dlm_complete_recovery_thread(dlm);
406 dlm_destroy_dlm_worker(dlm);
408 /* We've left the domain. Now we can take ourselves out of the
409 * list and allow the kref stuff to help us free the
411 spin_lock(&dlm_domain_lock);
412 list_del_init(&dlm->list);
413 spin_unlock(&dlm_domain_lock);
415 /* Wake up anyone waiting for us to remove this domain */
416 wake_up(&dlm_domain_events);
419 static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
421 int i, num, n, ret = 0;
422 struct dlm_lock_resource *res;
423 struct hlist_node *iter;
424 struct hlist_head *bucket;
427 mlog(0, "Migrating locks from domain %s\n", dlm->name);
430 spin_lock(&dlm->spinlock);
431 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
434 bucket = dlm_lockres_hash(dlm, i);
435 iter = bucket->first;
438 res = hlist_entry(iter, struct dlm_lock_resource,
440 dlm_lockres_get(res);
441 /* migrate, if necessary. this will drop the dlm
442 * spinlock and retake it if it does migration. */
443 dropped = dlm_empty_lockres(dlm, res);
445 spin_lock(&res->spinlock);
446 __dlm_lockres_calc_usage(dlm, res);
447 iter = res->hash_node.next;
448 spin_unlock(&res->spinlock);
450 dlm_lockres_put(res);
455 cond_resched_lock(&dlm->spinlock);
457 mlog(0, "%s: touched %d lockreses in bucket %d "
458 "(tot=%d)\n", dlm->name, n, i, num);
460 spin_unlock(&dlm->spinlock);
461 wake_up(&dlm->dlm_thread_wq);
463 /* let the dlm thread take care of purging, keep scanning until
464 * nothing remains in the hash */
466 mlog(0, "%s: %d lock resources in hash last pass\n",
470 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
474 static int dlm_no_joining_node(struct dlm_ctxt *dlm)
478 spin_lock(&dlm->spinlock);
479 ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
480 spin_unlock(&dlm->spinlock);
485 static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
487 /* Yikes, a double spinlock! I need domain_lock for the dlm
488 * state and the dlm spinlock for join state... Sorry! */
490 spin_lock(&dlm_domain_lock);
491 spin_lock(&dlm->spinlock);
493 if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
494 mlog(0, "Node %d is joining, we wait on it.\n",
496 spin_unlock(&dlm->spinlock);
497 spin_unlock(&dlm_domain_lock);
499 wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
503 dlm->dlm_state = DLM_CTXT_LEAVING;
504 spin_unlock(&dlm->spinlock);
505 spin_unlock(&dlm_domain_lock);
508 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
512 assert_spin_locked(&dlm->spinlock);
514 printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name);
516 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
517 node + 1)) < O2NM_MAX_NODES) {
523 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
526 struct dlm_ctxt *dlm = data;
528 struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
530 mlog_entry("%p %u %p", msg, len, data);
535 node = exit_msg->node_idx;
537 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name);
539 spin_lock(&dlm->spinlock);
540 clear_bit(node, dlm->domain_map);
541 __dlm_print_nodes(dlm);
543 /* notify anything attached to the heartbeat events */
544 dlm_hb_event_notify_attached(dlm, node, 0);
546 spin_unlock(&dlm->spinlock);
553 static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
557 struct dlm_exit_domain leave_msg;
559 mlog(0, "Asking node %u if we can leave the domain %s me = %u\n",
560 node, dlm->name, dlm->node_num);
562 memset(&leave_msg, 0, sizeof(leave_msg));
563 leave_msg.node_idx = dlm->node_num;
565 status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
566 &leave_msg, sizeof(leave_msg), node,
569 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
570 "node %u\n", status, DLM_EXIT_DOMAIN_MSG, dlm->key, node);
571 mlog(0, "status return %d from o2net_send_message\n", status);
577 static void dlm_leave_domain(struct dlm_ctxt *dlm)
579 int node, clear_node, status;
581 /* At this point we've migrated away all our locks and won't
582 * accept mastership of new ones. The dlm is responsible for
583 * almost nothing now. We make sure not to confuse any joining
584 * nodes and then commence shutdown procedure. */
586 spin_lock(&dlm->spinlock);
587 /* Clear ourselves from the domain map */
588 clear_bit(dlm->node_num, dlm->domain_map);
589 while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
590 0)) < O2NM_MAX_NODES) {
591 /* Drop the dlm spinlock. This is safe wrt the domain_map.
592 * -nodes cannot be added now as the
593 * query_join_handlers knows to respond with OK_NO_MAP
594 * -we catch the right network errors if a node is
595 * removed from the map while we're sending him the
597 spin_unlock(&dlm->spinlock);
601 status = dlm_send_one_domain_exit(dlm, node);
603 status != -ENOPROTOOPT &&
604 status != -ENOTCONN) {
605 mlog(ML_NOTICE, "Error %d sending domain exit message "
606 "to node %d\n", status, node);
608 /* Not sure what to do here but lets sleep for
609 * a bit in case this was a transient
611 msleep(DLM_DOMAIN_BACKOFF_MS);
615 spin_lock(&dlm->spinlock);
616 /* If we're not clearing the node bit then we intend
617 * to loop back around to try again. */
619 clear_bit(node, dlm->domain_map);
621 spin_unlock(&dlm->spinlock);
624 int dlm_joined(struct dlm_ctxt *dlm)
628 spin_lock(&dlm_domain_lock);
630 if (dlm->dlm_state == DLM_CTXT_JOINED)
633 spin_unlock(&dlm_domain_lock);
638 int dlm_shutting_down(struct dlm_ctxt *dlm)
642 spin_lock(&dlm_domain_lock);
644 if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
647 spin_unlock(&dlm_domain_lock);
652 void dlm_unregister_domain(struct dlm_ctxt *dlm)
655 struct dlm_lock_resource *res;
657 spin_lock(&dlm_domain_lock);
658 BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
659 BUG_ON(!dlm->num_joins);
662 if (!dlm->num_joins) {
663 /* We mark it "in shutdown" now so new register
664 * requests wait until we've completely left the
665 * domain. Don't use DLM_CTXT_LEAVING yet as we still
666 * want new domain joins to communicate with us at
667 * least until we've completed migration of our
669 dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
672 spin_unlock(&dlm_domain_lock);
675 mlog(0, "shutting down domain %s\n", dlm->name);
677 /* We changed dlm state, notify the thread */
678 dlm_kick_thread(dlm, NULL);
680 while (dlm_migrate_all_locks(dlm)) {
681 /* Give dlm_thread time to purge the lockres' */
683 mlog(0, "%s: more migration to do\n", dlm->name);
686 /* This list should be empty. If not, print remaining lockres */
687 if (!list_empty(&dlm->tracking_list)) {
688 mlog(ML_ERROR, "Following lockres' are still on the "
690 list_for_each_entry(res, &dlm->tracking_list, tracking)
691 dlm_print_one_lock_resource(res);
694 dlm_mark_domain_leaving(dlm);
695 dlm_leave_domain(dlm);
696 dlm_force_free_mles(dlm);
697 dlm_complete_dlm_shutdown(dlm);
701 EXPORT_SYMBOL_GPL(dlm_unregister_domain);
703 static int dlm_query_join_proto_check(char *proto_type, int node,
704 struct dlm_protocol_version *ours,
705 struct dlm_protocol_version *request)
708 struct dlm_protocol_version proto = *request;
710 if (!dlm_protocol_compare(ours, &proto)) {
712 "node %u wanted to join with %s locking protocol "
713 "%u.%u, we respond with %u.%u\n",
717 proto.pv_major, proto.pv_minor);
718 request->pv_minor = proto.pv_minor;
722 "Node %u wanted to join with %s locking "
723 "protocol %u.%u, but we have %u.%u, disallowing\n",
736 * struct dlm_query_join_packet is made up of four one-byte fields. They
737 * are effectively in big-endian order already. However, little-endian
738 * machines swap them before putting the packet on the wire (because
739 * query_join's response is a status, and that status is treated as a u32
740 * on the wire). Thus, a big-endian and little-endian machines will treat
741 * this structure differently.
743 * The solution is to have little-endian machines swap the structure when
744 * converting from the structure to the u32 representation. This will
745 * result in the structure having the correct format on the wire no matter
746 * the host endian format.
748 static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
751 union dlm_query_join_response response;
753 response.packet = *packet;
754 *wire = cpu_to_be32(response.intval);
757 static void dlm_query_join_wire_to_packet(u32 wire,
758 struct dlm_query_join_packet *packet)
760 union dlm_query_join_response response;
762 response.intval = cpu_to_be32(wire);
763 *packet = response.packet;
766 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
769 struct dlm_query_join_request *query;
770 struct dlm_query_join_packet packet = {
771 .code = JOIN_DISALLOW,
773 struct dlm_ctxt *dlm = NULL;
777 query = (struct dlm_query_join_request *) msg->buf;
779 mlog(0, "node %u wants to join domain %s\n", query->node_idx,
783 * If heartbeat doesn't consider the node live, tell it
784 * to back off and try again. This gives heartbeat a chance
787 if (!o2hb_check_node_heartbeating(query->node_idx)) {
788 mlog(0, "node %u is not in our live map yet\n",
791 packet.code = JOIN_DISALLOW;
795 packet.code = JOIN_OK_NO_MAP;
797 spin_lock(&dlm_domain_lock);
798 dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
803 * There is a small window where the joining node may not see the
804 * node(s) that just left but still part of the cluster. DISALLOW
805 * join request if joining node has different node map.
808 while (nodenum < O2NM_MAX_NODES) {
809 if (test_bit(nodenum, dlm->domain_map)) {
810 if (!byte_test_bit(nodenum, query->node_map)) {
811 mlog(0, "disallow join as node %u does not "
812 "have node %u in its nodemap\n",
813 query->node_idx, nodenum);
814 packet.code = JOIN_DISALLOW;
821 /* Once the dlm ctxt is marked as leaving then we don't want
822 * to be put in someone's domain map.
823 * Also, explicitly disallow joining at certain troublesome
824 * times (ie. during recovery). */
825 if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
826 int bit = query->node_idx;
827 spin_lock(&dlm->spinlock);
829 if (dlm->dlm_state == DLM_CTXT_NEW &&
830 dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
831 /*If this is a brand new context and we
832 * haven't started our join process yet, then
833 * the other node won the race. */
834 packet.code = JOIN_OK_NO_MAP;
835 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
836 /* Disallow parallel joins. */
837 packet.code = JOIN_DISALLOW;
838 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
839 mlog(0, "node %u trying to join, but recovery "
840 "is ongoing.\n", bit);
841 packet.code = JOIN_DISALLOW;
842 } else if (test_bit(bit, dlm->recovery_map)) {
843 mlog(0, "node %u trying to join, but it "
844 "still needs recovery.\n", bit);
845 packet.code = JOIN_DISALLOW;
846 } else if (test_bit(bit, dlm->domain_map)) {
847 mlog(0, "node %u trying to join, but it "
848 "is still in the domain! needs recovery?\n",
850 packet.code = JOIN_DISALLOW;
852 /* Alright we're fully a part of this domain
853 * so we keep some state as to who's joining
854 * and indicate to him that needs to be fixed
857 /* Make sure we speak compatible locking protocols. */
858 if (dlm_query_join_proto_check("DLM", bit,
859 &dlm->dlm_locking_proto,
860 &query->dlm_proto)) {
861 packet.code = JOIN_PROTOCOL_MISMATCH;
862 } else if (dlm_query_join_proto_check("fs", bit,
863 &dlm->fs_locking_proto,
865 packet.code = JOIN_PROTOCOL_MISMATCH;
867 packet.dlm_minor = query->dlm_proto.pv_minor;
868 packet.fs_minor = query->fs_proto.pv_minor;
869 packet.code = JOIN_OK;
870 __dlm_set_joining_node(dlm, query->node_idx);
874 spin_unlock(&dlm->spinlock);
877 spin_unlock(&dlm_domain_lock);
880 mlog(0, "We respond with %u\n", packet.code);
882 dlm_query_join_packet_to_wire(&packet, &response);
886 static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
889 struct dlm_assert_joined *assert;
890 struct dlm_ctxt *dlm = NULL;
892 assert = (struct dlm_assert_joined *) msg->buf;
894 mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
897 spin_lock(&dlm_domain_lock);
898 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
899 /* XXX should we consider no dlm ctxt an error? */
901 spin_lock(&dlm->spinlock);
903 /* Alright, this node has officially joined our
904 * domain. Set him in the map and clean up our
905 * leftover join state. */
906 BUG_ON(dlm->joining_node != assert->node_idx);
907 set_bit(assert->node_idx, dlm->domain_map);
908 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
910 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
911 assert->node_idx, dlm->name);
912 __dlm_print_nodes(dlm);
914 /* notify anything attached to the heartbeat events */
915 dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
917 spin_unlock(&dlm->spinlock);
919 spin_unlock(&dlm_domain_lock);
924 static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
927 struct dlm_cancel_join *cancel;
928 struct dlm_ctxt *dlm = NULL;
930 cancel = (struct dlm_cancel_join *) msg->buf;
932 mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
935 spin_lock(&dlm_domain_lock);
936 dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
939 spin_lock(&dlm->spinlock);
941 /* Yikes, this guy wants to cancel his join. No
942 * problem, we simply cleanup our join state. */
943 BUG_ON(dlm->joining_node != cancel->node_idx);
944 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
946 spin_unlock(&dlm->spinlock);
948 spin_unlock(&dlm_domain_lock);
953 static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
957 struct dlm_cancel_join cancel_msg;
959 memset(&cancel_msg, 0, sizeof(cancel_msg));
960 cancel_msg.node_idx = dlm->node_num;
961 cancel_msg.name_len = strlen(dlm->name);
962 memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
964 status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
965 &cancel_msg, sizeof(cancel_msg), node,
968 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
969 "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
978 /* map_size should be in bytes. */
979 static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
980 unsigned long *node_map,
981 unsigned int map_size)
986 if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) *
987 sizeof(unsigned long))) {
989 "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
990 map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES));
996 while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
997 node + 1)) < O2NM_MAX_NODES) {
998 if (node == dlm->node_num)
1001 tmpstat = dlm_send_one_join_cancel(dlm, node);
1003 mlog(ML_ERROR, "Error return %d cancelling join on "
1004 "node %d\n", tmpstat, node);
1015 static int dlm_request_join(struct dlm_ctxt *dlm,
1017 enum dlm_query_join_response_code *response)
1020 struct dlm_query_join_request join_msg;
1021 struct dlm_query_join_packet packet;
1024 mlog(0, "querying node %d\n", node);
1026 memset(&join_msg, 0, sizeof(join_msg));
1027 join_msg.node_idx = dlm->node_num;
1028 join_msg.name_len = strlen(dlm->name);
1029 memcpy(join_msg.domain, dlm->name, join_msg.name_len);
1030 join_msg.dlm_proto = dlm->dlm_locking_proto;
1031 join_msg.fs_proto = dlm->fs_locking_proto;
1033 /* copy live node map to join message */
1034 byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
1036 status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
1037 sizeof(join_msg), node, &join_resp);
1038 if (status < 0 && status != -ENOPROTOOPT) {
1039 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
1040 "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
1044 dlm_query_join_wire_to_packet(join_resp, &packet);
1046 /* -ENOPROTOOPT from the net code means the other side isn't
1047 listening for our message type -- that's fine, it means
1048 his dlm isn't up, so we can consider him a 'yes' but not
1049 joined into the domain. */
1050 if (status == -ENOPROTOOPT) {
1052 *response = JOIN_OK_NO_MAP;
1053 } else if (packet.code == JOIN_DISALLOW ||
1054 packet.code == JOIN_OK_NO_MAP) {
1055 *response = packet.code;
1056 } else if (packet.code == JOIN_PROTOCOL_MISMATCH) {
1058 "This node requested DLM locking protocol %u.%u and "
1059 "filesystem locking protocol %u.%u. At least one of "
1060 "the protocol versions on node %d is not compatible, "
1062 dlm->dlm_locking_proto.pv_major,
1063 dlm->dlm_locking_proto.pv_minor,
1064 dlm->fs_locking_proto.pv_major,
1065 dlm->fs_locking_proto.pv_minor,
1068 *response = packet.code;
1069 } else if (packet.code == JOIN_OK) {
1070 *response = packet.code;
1071 /* Use the same locking protocol as the remote node */
1072 dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
1073 dlm->fs_locking_proto.pv_minor = packet.fs_minor;
1075 "Node %d responds JOIN_OK with DLM locking protocol "
1076 "%u.%u and fs locking protocol %u.%u\n",
1078 dlm->dlm_locking_proto.pv_major,
1079 dlm->dlm_locking_proto.pv_minor,
1080 dlm->fs_locking_proto.pv_major,
1081 dlm->fs_locking_proto.pv_minor);
1084 mlog(ML_ERROR, "invalid response %d from node %u\n",
1088 mlog(0, "status %d, node %d response is %d\n", status, node,
1095 static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
1099 struct dlm_assert_joined assert_msg;
1101 mlog(0, "Sending join assert to node %u\n", node);
1103 memset(&assert_msg, 0, sizeof(assert_msg));
1104 assert_msg.node_idx = dlm->node_num;
1105 assert_msg.name_len = strlen(dlm->name);
1106 memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
1108 status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1109 &assert_msg, sizeof(assert_msg), node,
1112 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
1113 "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1119 static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
1120 unsigned long *node_map)
1122 int status, node, live;
1126 while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
1127 node + 1)) < O2NM_MAX_NODES) {
1128 if (node == dlm->node_num)
1132 /* It is very important that this message be
1133 * received so we spin until either the node
1134 * has died or it gets the message. */
1135 status = dlm_send_one_join_assert(dlm, node);
1137 spin_lock(&dlm->spinlock);
1138 live = test_bit(node, dlm->live_nodes_map);
1139 spin_unlock(&dlm->spinlock);
1142 mlog(ML_ERROR, "Error return %d asserting "
1143 "join on node %d\n", status, node);
1145 /* give us some time between errors... */
1147 msleep(DLM_DOMAIN_BACKOFF_MS);
1149 } while (status && live);
1153 struct domain_join_ctxt {
1154 unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1155 unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1158 static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1159 struct domain_join_ctxt *ctxt,
1160 enum dlm_query_join_response_code response)
1164 if (response == JOIN_DISALLOW) {
1165 mlog(0, "Latest response of disallow -- should restart\n");
1169 spin_lock(&dlm->spinlock);
1170 /* For now, we restart the process if the node maps have
1172 ret = memcmp(ctxt->live_map, dlm->live_nodes_map,
1173 sizeof(dlm->live_nodes_map));
1174 spin_unlock(&dlm->spinlock);
1177 mlog(0, "Node maps changed -- should restart\n");
1182 static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1184 int status = 0, tmpstat, node;
1185 struct domain_join_ctxt *ctxt;
1186 enum dlm_query_join_response_code response = JOIN_DISALLOW;
1188 mlog_entry("%p", dlm);
1190 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1197 /* group sem locking should work for us here -- we're already
1198 * registered for heartbeat events so filling this should be
1199 * atomic wrt getting those handlers called. */
1200 o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
1202 spin_lock(&dlm->spinlock);
1203 memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map));
1205 __dlm_set_joining_node(dlm, dlm->node_num);
1207 spin_unlock(&dlm->spinlock);
1210 while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
1211 node + 1)) < O2NM_MAX_NODES) {
1212 if (node == dlm->node_num)
1215 status = dlm_request_join(dlm, node, &response);
1221 /* Ok, either we got a response or the node doesn't have a
1223 if (response == JOIN_OK)
1224 set_bit(node, ctxt->yes_resp_map);
1226 if (dlm_should_restart_join(dlm, ctxt, response)) {
1232 mlog(0, "Yay, done querying nodes!\n");
1234 /* Yay, everyone agree's we can join the domain. My domain is
1235 * comprised of all nodes who were put in the
1236 * yes_resp_map. Copy that into our domain map and send a join
1237 * assert message to clean up everyone elses state. */
1238 spin_lock(&dlm->spinlock);
1239 memcpy(dlm->domain_map, ctxt->yes_resp_map,
1240 sizeof(ctxt->yes_resp_map));
1241 set_bit(dlm->node_num, dlm->domain_map);
1242 spin_unlock(&dlm->spinlock);
1244 dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
1246 /* Joined state *must* be set before the joining node
1247 * information, otherwise the query_join handler may read no
1248 * current joiner but a state of NEW and tell joining nodes
1249 * we're not in the domain. */
1250 spin_lock(&dlm_domain_lock);
1251 dlm->dlm_state = DLM_CTXT_JOINED;
1253 spin_unlock(&dlm_domain_lock);
1256 spin_lock(&dlm->spinlock);
1257 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1259 __dlm_print_nodes(dlm);
1260 spin_unlock(&dlm->spinlock);
1263 /* Do we need to send a cancel message to any nodes? */
1265 tmpstat = dlm_send_join_cancels(dlm,
1267 sizeof(ctxt->yes_resp_map));
1269 mlog_errno(tmpstat);
1274 mlog(0, "returning %d\n", status);
1278 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
1280 o2hb_unregister_callback(NULL, &dlm->dlm_hb_up);
1281 o2hb_unregister_callback(NULL, &dlm->dlm_hb_down);
1282 o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
1285 static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1289 mlog(0, "registering handlers.\n");
1291 o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
1292 dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
1293 status = o2hb_register_callback(NULL, &dlm->dlm_hb_down);
1297 o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
1298 dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
1299 status = o2hb_register_callback(NULL, &dlm->dlm_hb_up);
1303 status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
1304 sizeof(struct dlm_master_request),
1305 dlm_master_request_handler,
1306 dlm, NULL, &dlm->dlm_domain_handlers);
1310 status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
1311 sizeof(struct dlm_assert_master),
1312 dlm_assert_master_handler,
1313 dlm, dlm_assert_master_post_handler,
1314 &dlm->dlm_domain_handlers);
1318 status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
1319 sizeof(struct dlm_create_lock),
1320 dlm_create_lock_handler,
1321 dlm, NULL, &dlm->dlm_domain_handlers);
1325 status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
1326 DLM_CONVERT_LOCK_MAX_LEN,
1327 dlm_convert_lock_handler,
1328 dlm, NULL, &dlm->dlm_domain_handlers);
1332 status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
1333 DLM_UNLOCK_LOCK_MAX_LEN,
1334 dlm_unlock_lock_handler,
1335 dlm, NULL, &dlm->dlm_domain_handlers);
1339 status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
1340 DLM_PROXY_AST_MAX_LEN,
1341 dlm_proxy_ast_handler,
1342 dlm, NULL, &dlm->dlm_domain_handlers);
1346 status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
1347 sizeof(struct dlm_exit_domain),
1348 dlm_exit_domain_handler,
1349 dlm, NULL, &dlm->dlm_domain_handlers);
1353 status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
1354 sizeof(struct dlm_deref_lockres),
1355 dlm_deref_lockres_handler,
1356 dlm, NULL, &dlm->dlm_domain_handlers);
1360 status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
1361 sizeof(struct dlm_migrate_request),
1362 dlm_migrate_request_handler,
1363 dlm, NULL, &dlm->dlm_domain_handlers);
1367 status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
1368 DLM_MIG_LOCKRES_MAX_LEN,
1369 dlm_mig_lockres_handler,
1370 dlm, NULL, &dlm->dlm_domain_handlers);
1374 status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
1375 sizeof(struct dlm_master_requery),
1376 dlm_master_requery_handler,
1377 dlm, NULL, &dlm->dlm_domain_handlers);
1381 status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
1382 sizeof(struct dlm_lock_request),
1383 dlm_request_all_locks_handler,
1384 dlm, NULL, &dlm->dlm_domain_handlers);
1388 status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
1389 sizeof(struct dlm_reco_data_done),
1390 dlm_reco_data_done_handler,
1391 dlm, NULL, &dlm->dlm_domain_handlers);
1395 status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
1396 sizeof(struct dlm_begin_reco),
1397 dlm_begin_reco_handler,
1398 dlm, NULL, &dlm->dlm_domain_handlers);
1402 status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
1403 sizeof(struct dlm_finalize_reco),
1404 dlm_finalize_reco_handler,
1405 dlm, NULL, &dlm->dlm_domain_handlers);
1411 dlm_unregister_domain_handlers(dlm);
1416 static int dlm_join_domain(struct dlm_ctxt *dlm)
1419 unsigned int backoff;
1420 unsigned int total_backoff = 0;
1424 mlog(0, "Join domain %s\n", dlm->name);
1426 status = dlm_register_domain_handlers(dlm);
1432 status = dlm_debug_init(dlm);
1438 status = dlm_launch_thread(dlm);
1444 status = dlm_launch_recovery_thread(dlm);
1450 dlm->dlm_worker = create_singlethread_workqueue("dlm_wq");
1451 if (!dlm->dlm_worker) {
1458 status = dlm_try_to_join_domain(dlm);
1460 /* If we're racing another node to the join, then we
1461 * need to back off temporarily and let them
1463 #define DLM_JOIN_TIMEOUT_MSECS 90000
1464 if (status == -EAGAIN) {
1465 if (signal_pending(current)) {
1466 status = -ERESTARTSYS;
1471 msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) {
1472 status = -ERESTARTSYS;
1473 mlog(ML_NOTICE, "Timed out joining dlm domain "
1474 "%s after %u msecs\n", dlm->name,
1475 jiffies_to_msecs(total_backoff));
1481 * <dale> No, after you!
1483 * <dale> But you first!
1486 backoff = (unsigned int)(jiffies & 0x3);
1487 backoff *= DLM_DOMAIN_BACKOFF_MS;
1488 total_backoff += backoff;
1489 mlog(0, "backoff %d\n", backoff);
1492 } while (status == -EAGAIN);
1501 wake_up(&dlm_domain_events);
1504 dlm_unregister_domain_handlers(dlm);
1505 dlm_debug_shutdown(dlm);
1506 dlm_complete_thread(dlm);
1507 dlm_complete_recovery_thread(dlm);
1508 dlm_destroy_dlm_worker(dlm);
1514 static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1519 struct dlm_ctxt *dlm = NULL;
1521 dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1523 mlog_errno(-ENOMEM);
1527 dlm->name = kstrdup(domain, GFP_KERNEL);
1528 if (dlm->name == NULL) {
1529 mlog_errno(-ENOMEM);
1535 dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
1536 if (!dlm->lockres_hash) {
1537 mlog_errno(-ENOMEM);
1544 for (i = 0; i < DLM_HASH_BUCKETS; i++)
1545 INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
1547 dlm->master_hash = (struct hlist_head **)
1548 dlm_alloc_pagevec(DLM_HASH_PAGES);
1549 if (!dlm->master_hash) {
1550 mlog_errno(-ENOMEM);
1551 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
1558 for (i = 0; i < DLM_HASH_BUCKETS; i++)
1559 INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
1562 dlm->node_num = o2nm_this_node();
1564 ret = dlm_create_debugfs_subroot(dlm);
1566 dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
1567 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
1574 spin_lock_init(&dlm->spinlock);
1575 spin_lock_init(&dlm->master_lock);
1576 spin_lock_init(&dlm->ast_lock);
1577 spin_lock_init(&dlm->track_lock);
1578 INIT_LIST_HEAD(&dlm->list);
1579 INIT_LIST_HEAD(&dlm->dirty_list);
1580 INIT_LIST_HEAD(&dlm->reco.resources);
1581 INIT_LIST_HEAD(&dlm->reco.received);
1582 INIT_LIST_HEAD(&dlm->reco.node_data);
1583 INIT_LIST_HEAD(&dlm->purge_list);
1584 INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
1585 INIT_LIST_HEAD(&dlm->tracking_list);
1586 dlm->reco.state = 0;
1588 INIT_LIST_HEAD(&dlm->pending_asts);
1589 INIT_LIST_HEAD(&dlm->pending_basts);
1591 mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
1592 dlm->recovery_map, &(dlm->recovery_map[0]));
1594 memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map));
1595 memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map));
1596 memset(dlm->domain_map, 0, sizeof(dlm->domain_map));
1598 dlm->dlm_thread_task = NULL;
1599 dlm->dlm_reco_thread_task = NULL;
1600 dlm->dlm_worker = NULL;
1601 init_waitqueue_head(&dlm->dlm_thread_wq);
1602 init_waitqueue_head(&dlm->dlm_reco_thread_wq);
1603 init_waitqueue_head(&dlm->reco.event);
1604 init_waitqueue_head(&dlm->ast_wq);
1605 init_waitqueue_head(&dlm->migration_wq);
1606 INIT_LIST_HEAD(&dlm->mle_hb_events);
1608 dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
1609 init_waitqueue_head(&dlm->dlm_join_events);
1611 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
1612 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
1614 atomic_set(&dlm->res_tot_count, 0);
1615 atomic_set(&dlm->res_cur_count, 0);
1616 for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
1617 atomic_set(&dlm->mle_tot_count[i], 0);
1618 atomic_set(&dlm->mle_cur_count[i], 0);
1621 spin_lock_init(&dlm->work_lock);
1622 INIT_LIST_HEAD(&dlm->work_list);
1623 INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
1625 kref_init(&dlm->dlm_refs);
1626 dlm->dlm_state = DLM_CTXT_NEW;
1628 INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
1630 mlog(0, "context init: refcount %u\n",
1631 atomic_read(&dlm->dlm_refs.refcount));
1638 * Compare a requested locking protocol version against the current one.
1640 * If the major numbers are different, they are incompatible.
1641 * If the current minor is greater than the request, they are incompatible.
1642 * If the current minor is less than or equal to the request, they are
1643 * compatible, and the requester should run at the current minor version.
1645 static int dlm_protocol_compare(struct dlm_protocol_version *existing,
1646 struct dlm_protocol_version *request)
1648 if (existing->pv_major != request->pv_major)
1651 if (existing->pv_minor > request->pv_minor)
1654 if (existing->pv_minor < request->pv_minor)
1655 request->pv_minor = existing->pv_minor;
1661 * dlm_register_domain: one-time setup per "domain".
1663 * The filesystem passes in the requested locking version via proto.
1664 * If registration was successful, proto will contain the negotiated
1667 struct dlm_ctxt * dlm_register_domain(const char *domain,
1669 struct dlm_protocol_version *fs_proto)
1672 struct dlm_ctxt *dlm = NULL;
1673 struct dlm_ctxt *new_ctxt = NULL;
1675 if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
1676 ret = -ENAMETOOLONG;
1677 mlog(ML_ERROR, "domain name length too long\n");
1681 if (!o2hb_check_local_node_heartbeating()) {
1682 mlog(ML_ERROR, "the local node has not been configured, or is "
1683 "not heartbeating\n");
1688 mlog(0, "register called for domain \"%s\"\n", domain);
1692 if (signal_pending(current)) {
1698 spin_lock(&dlm_domain_lock);
1700 dlm = __dlm_lookup_domain(domain);
1702 if (dlm->dlm_state != DLM_CTXT_JOINED) {
1703 spin_unlock(&dlm_domain_lock);
1705 mlog(0, "This ctxt is not joined yet!\n");
1706 wait_event_interruptible(dlm_domain_events,
1707 dlm_wait_on_domain_helper(
1712 if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
1713 spin_unlock(&dlm_domain_lock);
1715 "Requested locking protocol version is not "
1716 "compatible with already registered domain "
1717 "\"%s\"\n", domain);
1725 spin_unlock(&dlm_domain_lock);
1733 spin_unlock(&dlm_domain_lock);
1735 new_ctxt = dlm_alloc_ctxt(domain, key);
1744 /* a little variable switch-a-roo here... */
1748 /* add the new domain */
1749 list_add_tail(&dlm->list, &dlm_domains);
1750 spin_unlock(&dlm_domain_lock);
1753 * Pass the locking protocol version into the join. If the join
1754 * succeeds, it will have the negotiated protocol set.
1756 dlm->dlm_locking_proto = dlm_protocol;
1757 dlm->fs_locking_proto = *fs_proto;
1759 ret = dlm_join_domain(dlm);
1766 /* Tell the caller what locking protocol we negotiated */
1767 *fs_proto = dlm->fs_locking_proto;
1772 dlm_free_ctxt_mem(new_ctxt);
1779 EXPORT_SYMBOL_GPL(dlm_register_domain);
1781 static LIST_HEAD(dlm_join_handlers);
1783 static void dlm_unregister_net_handlers(void)
1785 o2net_unregister_handler_list(&dlm_join_handlers);
1788 static int dlm_register_net_handlers(void)
1792 status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
1793 sizeof(struct dlm_query_join_request),
1794 dlm_query_join_handler,
1795 NULL, NULL, &dlm_join_handlers);
1799 status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1800 sizeof(struct dlm_assert_joined),
1801 dlm_assert_joined_handler,
1802 NULL, NULL, &dlm_join_handlers);
1806 status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
1807 sizeof(struct dlm_cancel_join),
1808 dlm_cancel_join_handler,
1809 NULL, NULL, &dlm_join_handlers);
1813 dlm_unregister_net_handlers();
1818 /* Domain eviction callback handling.
1820 * The file system requires notification of node death *before* the
1821 * dlm completes it's recovery work, otherwise it may be able to
1822 * acquire locks on resources requiring recovery. Since the dlm can
1823 * evict a node from it's domain *before* heartbeat fires, a similar
1824 * mechanism is required. */
1826 /* Eviction is not expected to happen often, so a per-domain lock is
1827 * not necessary. Eviction callbacks are allowed to sleep for short
1828 * periods of time. */
1829 static DECLARE_RWSEM(dlm_callback_sem);
1831 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
1834 struct list_head *iter;
1835 struct dlm_eviction_cb *cb;
1837 down_read(&dlm_callback_sem);
1838 list_for_each(iter, &dlm->dlm_eviction_callbacks) {
1839 cb = list_entry(iter, struct dlm_eviction_cb, ec_item);
1841 cb->ec_func(node_num, cb->ec_data);
1843 up_read(&dlm_callback_sem);
1846 void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
1847 dlm_eviction_func *f,
1850 INIT_LIST_HEAD(&cb->ec_item);
1854 EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
1856 void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
1857 struct dlm_eviction_cb *cb)
1859 down_write(&dlm_callback_sem);
1860 list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
1861 up_write(&dlm_callback_sem);
1863 EXPORT_SYMBOL_GPL(dlm_register_eviction_cb);
1865 void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
1867 down_write(&dlm_callback_sem);
1868 list_del_init(&cb->ec_item);
1869 up_write(&dlm_callback_sem);
1871 EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb);
1873 static int __init dlm_init(void)
1877 dlm_print_version();
1879 status = dlm_init_mle_cache();
1881 mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n");
1885 status = dlm_init_master_caches();
1887 mlog(ML_ERROR, "Could not create o2dlm_lockres and "
1888 "o2dlm_lockname slabcaches\n");
1892 status = dlm_init_lock_cache();
1894 mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n");
1898 status = dlm_register_net_handlers();
1900 mlog(ML_ERROR, "Unable to register network handlers\n");
1904 status = dlm_create_debugfs_root();
1910 dlm_unregister_net_handlers();
1911 dlm_destroy_lock_cache();
1912 dlm_destroy_master_caches();
1913 dlm_destroy_mle_cache();
1917 static void __exit dlm_exit (void)
1919 dlm_destroy_debugfs_root();
1920 dlm_unregister_net_handlers();
1921 dlm_destroy_lock_cache();
1922 dlm_destroy_master_caches();
1923 dlm_destroy_mle_cache();
1926 MODULE_AUTHOR("Oracle");
1927 MODULE_LICENSE("GPL");
1929 module_init(dlm_init);
1930 module_exit(dlm_exit);