1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/smp_lock.h>
31 #include <linux/crc32.h>
32 #include <linux/kthread.h>
33 #include <linux/pagemap.h>
34 #include <linux/debugfs.h>
35 #include <linux/seq_file.h>
37 #include <cluster/heartbeat.h>
38 #include <cluster/nodemanager.h>
39 #include <cluster/tcp.h>
41 #include <dlm/dlmapi.h>
43 #define MLOG_MASK_PREFIX ML_DLM_GLUE
44 #include <cluster/masklog.h>
51 #include "extent_map.h"
52 #include "heartbeat.h"
60 #include "buffer_head_io.h"
62 struct ocfs2_mask_waiter {
63 struct list_head mw_item;
65 struct completion mw_complete;
66 unsigned long mw_mask;
67 unsigned long mw_goal;
70 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
71 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
74 * Return value from ->downconvert_worker functions.
76 * These control the precise actions of ocfs2_unblock_lock()
77 * and ocfs2_process_blocked_lock()
80 enum ocfs2_unblock_action {
81 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
82 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
83 * ->post_unlock callback */
84 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
85 * ->post_unlock() callback. */
88 struct ocfs2_unblock_ctl {
90 enum ocfs2_unblock_action unblock_action;
93 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
95 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
97 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
100 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
103 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
104 struct ocfs2_lock_res *lockres);
107 * OCFS2 Lock Resource Operations
109 * These fine tune the behavior of the generic dlmglue locking infrastructure.
111 struct ocfs2_lock_res_ops {
113 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
114 * this callback if ->l_priv is not an ocfs2_super pointer
116 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
118 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
121 * Allow a lock type to add checks to determine whether it is
122 * safe to downconvert a lock. Return 0 to re-queue the
123 * downconvert at a later time, nonzero to continue.
125 * For most locks, the default checks that there are no
126 * incompatible holders are sufficient.
128 * Called with the lockres spinlock held.
130 int (*check_downconvert)(struct ocfs2_lock_res *, int);
133 * Allows a lock type to populate the lock value block. This
134 * is called on downconvert, and when we drop a lock.
136 * Locks that want to use this should set LOCK_TYPE_USES_LVB
137 * in the flags field.
139 * Called with the lockres spinlock held.
141 void (*set_lvb)(struct ocfs2_lock_res *);
144 * Called from the downconvert thread when it is determined
145 * that a lock will be downconverted. This is called without
146 * any locks held so the function can do work that might
147 * schedule (syncing out data, etc).
149 * This should return any one of the ocfs2_unblock_action
150 * values, depending on what it wants the thread to do.
152 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
155 * LOCK_TYPE_* flags which describe the specific requirements
156 * of a lock type. Descriptions of each individual flag follow.
162 * Some locks want to "refresh" potentially stale data when a
163 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
164 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
165 * individual lockres l_flags member from the ast function. It is
166 * expected that the locking wrapper will clear the
167 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
169 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
172 * Indicate that a lock type makes use of the lock value block. The
173 * ->set_lvb lock type callback must be defined.
175 #define LOCK_TYPE_USES_LVB 0x2
177 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
178 .get_osb = ocfs2_get_inode_osb,
182 static struct ocfs2_lock_res_ops ocfs2_inode_meta_lops = {
183 .get_osb = ocfs2_get_inode_osb,
184 .check_downconvert = ocfs2_check_meta_downconvert,
185 .set_lvb = ocfs2_set_meta_lvb,
186 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
189 static struct ocfs2_lock_res_ops ocfs2_inode_data_lops = {
190 .get_osb = ocfs2_get_inode_osb,
191 .downconvert_worker = ocfs2_data_convert_worker,
195 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
196 .flags = LOCK_TYPE_REQUIRES_REFRESH,
199 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
203 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
204 .get_osb = ocfs2_get_dentry_osb,
205 .post_unlock = ocfs2_dentry_post_unlock,
206 .downconvert_worker = ocfs2_dentry_convert_worker,
210 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
212 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
213 lockres->l_type == OCFS2_LOCK_TYPE_DATA ||
214 lockres->l_type == OCFS2_LOCK_TYPE_RW;
217 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
219 BUG_ON(!ocfs2_is_inode_lock(lockres));
221 return (struct inode *) lockres->l_priv;
224 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
226 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
228 return (struct ocfs2_dentry_lock *)lockres->l_priv;
231 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
233 if (lockres->l_ops->get_osb)
234 return lockres->l_ops->get_osb(lockres);
236 return (struct ocfs2_super *)lockres->l_priv;
239 static int ocfs2_lock_create(struct ocfs2_super *osb,
240 struct ocfs2_lock_res *lockres,
243 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
245 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
246 struct ocfs2_lock_res *lockres,
248 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
249 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
250 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
251 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
252 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
253 struct ocfs2_lock_res *lockres);
254 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
256 #define ocfs2_log_dlm_error(_func, _stat, _lockres) do { \
257 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
258 "resource %s: %s\n", dlm_errname(_stat), _func, \
259 _lockres->l_name, dlm_errmsg(_stat)); \
261 static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
262 struct ocfs2_lock_res *lockres);
263 static int ocfs2_meta_lock_update(struct inode *inode,
264 struct buffer_head **bh);
265 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
266 static inline int ocfs2_highest_compat_lock_level(int level);
268 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
277 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
279 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
280 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
281 (long long)blkno, generation);
283 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
285 mlog(0, "built lock resource with name: %s\n", name);
290 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
292 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
293 struct ocfs2_dlm_debug *dlm_debug)
295 mlog(0, "Add tracking for lockres %s\n", res->l_name);
297 spin_lock(&ocfs2_dlm_tracking_lock);
298 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
299 spin_unlock(&ocfs2_dlm_tracking_lock);
302 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
304 spin_lock(&ocfs2_dlm_tracking_lock);
305 if (!list_empty(&res->l_debug_list))
306 list_del_init(&res->l_debug_list);
307 spin_unlock(&ocfs2_dlm_tracking_lock);
310 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
311 struct ocfs2_lock_res *res,
312 enum ocfs2_lock_type type,
313 struct ocfs2_lock_res_ops *ops,
320 res->l_level = LKM_IVMODE;
321 res->l_requested = LKM_IVMODE;
322 res->l_blocking = LKM_IVMODE;
323 res->l_action = OCFS2_AST_INVALID;
324 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
326 res->l_flags = OCFS2_LOCK_INITIALIZED;
328 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
331 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
333 /* This also clears out the lock status block */
334 memset(res, 0, sizeof(struct ocfs2_lock_res));
335 spin_lock_init(&res->l_lock);
336 init_waitqueue_head(&res->l_event);
337 INIT_LIST_HEAD(&res->l_blocked_list);
338 INIT_LIST_HEAD(&res->l_mask_waiters);
341 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
342 enum ocfs2_lock_type type,
343 unsigned int generation,
346 struct ocfs2_lock_res_ops *ops;
349 case OCFS2_LOCK_TYPE_RW:
350 ops = &ocfs2_inode_rw_lops;
352 case OCFS2_LOCK_TYPE_META:
353 ops = &ocfs2_inode_meta_lops;
355 case OCFS2_LOCK_TYPE_DATA:
356 ops = &ocfs2_inode_data_lops;
359 mlog_bug_on_msg(1, "type: %d\n", type);
360 ops = NULL; /* thanks, gcc */
364 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
365 generation, res->l_name);
366 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
369 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
371 struct inode *inode = ocfs2_lock_res_inode(lockres);
373 return OCFS2_SB(inode->i_sb);
376 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
378 __be64 inode_blkno_be;
380 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
383 return be64_to_cpu(inode_blkno_be);
386 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
388 struct ocfs2_dentry_lock *dl = lockres->l_priv;
390 return OCFS2_SB(dl->dl_inode->i_sb);
393 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
394 u64 parent, struct inode *inode)
397 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
398 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
399 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
401 ocfs2_lock_res_init_once(lockres);
404 * Unfortunately, the standard lock naming scheme won't work
405 * here because we have two 16 byte values to use. Instead,
406 * we'll stuff the inode number as a binary value. We still
407 * want error prints to show something without garbling the
408 * display, so drop a null byte in there before the inode
409 * number. A future version of OCFS2 will likely use all
410 * binary lock names. The stringified names have been a
411 * tremendous aid in debugging, but now that the debugfs
412 * interface exists, we can mangle things there if need be.
414 * NOTE: We also drop the standard "pad" value (the total lock
415 * name size stays the same though - the last part is all
416 * zeros due to the memset in ocfs2_lock_res_init_once()
418 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
420 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
423 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
425 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
428 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
429 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
433 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
434 struct ocfs2_super *osb)
436 /* Superblock lockres doesn't come from a slab so we call init
437 * once on it manually. */
438 ocfs2_lock_res_init_once(res);
439 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
441 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
442 &ocfs2_super_lops, osb);
445 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
446 struct ocfs2_super *osb)
448 /* Rename lockres doesn't come from a slab so we call init
449 * once on it manually. */
450 ocfs2_lock_res_init_once(res);
451 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
452 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
453 &ocfs2_rename_lops, osb);
456 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
460 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
463 ocfs2_remove_lockres_tracking(res);
465 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
466 "Lockres %s is on the blocked list\n",
468 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
469 "Lockres %s has mask waiters pending\n",
471 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
472 "Lockres %s is locked\n",
474 mlog_bug_on_msg(res->l_ro_holders,
475 "Lockres %s has %u ro holders\n",
476 res->l_name, res->l_ro_holders);
477 mlog_bug_on_msg(res->l_ex_holders,
478 "Lockres %s has %u ex holders\n",
479 res->l_name, res->l_ex_holders);
481 /* Need to clear out the lock status block for the dlm */
482 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
488 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
497 lockres->l_ex_holders++;
500 lockres->l_ro_holders++;
509 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
518 BUG_ON(!lockres->l_ex_holders);
519 lockres->l_ex_holders--;
522 BUG_ON(!lockres->l_ro_holders);
523 lockres->l_ro_holders--;
531 /* WARNING: This function lives in a world where the only three lock
532 * levels are EX, PR, and NL. It *will* have to be adjusted when more
533 * lock types are added. */
534 static inline int ocfs2_highest_compat_lock_level(int level)
536 int new_level = LKM_EXMODE;
538 if (level == LKM_EXMODE)
539 new_level = LKM_NLMODE;
540 else if (level == LKM_PRMODE)
541 new_level = LKM_PRMODE;
545 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
546 unsigned long newflags)
548 struct list_head *pos, *tmp;
549 struct ocfs2_mask_waiter *mw;
551 assert_spin_locked(&lockres->l_lock);
553 lockres->l_flags = newflags;
555 list_for_each_safe(pos, tmp, &lockres->l_mask_waiters) {
556 mw = list_entry(pos, struct ocfs2_mask_waiter, mw_item);
557 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
560 list_del_init(&mw->mw_item);
562 complete(&mw->mw_complete);
565 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
567 lockres_set_flags(lockres, lockres->l_flags | or);
569 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
572 lockres_set_flags(lockres, lockres->l_flags & ~clear);
575 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
579 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
580 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
581 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
582 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
584 lockres->l_level = lockres->l_requested;
585 if (lockres->l_level <=
586 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
587 lockres->l_blocking = LKM_NLMODE;
588 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
590 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
595 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
599 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
600 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
602 /* Convert from RO to EX doesn't really need anything as our
603 * information is already up to data. Convert from NL to
604 * *anything* however should mark ourselves as needing an
606 if (lockres->l_level == LKM_NLMODE &&
607 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
608 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
610 lockres->l_level = lockres->l_requested;
611 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
616 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
620 BUG_ON((!lockres->l_flags & OCFS2_LOCK_BUSY));
621 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
623 if (lockres->l_requested > LKM_NLMODE &&
624 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
625 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
626 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
628 lockres->l_level = lockres->l_requested;
629 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
630 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
635 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
638 int needs_downconvert = 0;
641 assert_spin_locked(&lockres->l_lock);
643 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
645 if (level > lockres->l_blocking) {
646 /* only schedule a downconvert if we haven't already scheduled
647 * one that goes low enough to satisfy the level we're
648 * blocking. this also catches the case where we get
650 if (ocfs2_highest_compat_lock_level(level) <
651 ocfs2_highest_compat_lock_level(lockres->l_blocking))
652 needs_downconvert = 1;
654 lockres->l_blocking = level;
657 mlog_exit(needs_downconvert);
658 return needs_downconvert;
661 static void ocfs2_blocking_ast(void *opaque, int level)
663 struct ocfs2_lock_res *lockres = opaque;
664 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
665 int needs_downconvert;
668 BUG_ON(level <= LKM_NLMODE);
670 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n",
671 lockres->l_name, level, lockres->l_level,
672 ocfs2_lock_type_string(lockres->l_type));
674 spin_lock_irqsave(&lockres->l_lock, flags);
675 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
676 if (needs_downconvert)
677 ocfs2_schedule_blocked_lock(osb, lockres);
678 spin_unlock_irqrestore(&lockres->l_lock, flags);
680 wake_up(&lockres->l_event);
682 ocfs2_kick_vote_thread(osb);
685 static void ocfs2_locking_ast(void *opaque)
687 struct ocfs2_lock_res *lockres = opaque;
688 struct dlm_lockstatus *lksb = &lockres->l_lksb;
691 spin_lock_irqsave(&lockres->l_lock, flags);
693 if (lksb->status != DLM_NORMAL) {
694 mlog(ML_ERROR, "lockres %s: lksb status value of %u!\n",
695 lockres->l_name, lksb->status);
696 spin_unlock_irqrestore(&lockres->l_lock, flags);
700 switch(lockres->l_action) {
701 case OCFS2_AST_ATTACH:
702 ocfs2_generic_handle_attach_action(lockres);
703 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
705 case OCFS2_AST_CONVERT:
706 ocfs2_generic_handle_convert_action(lockres);
708 case OCFS2_AST_DOWNCONVERT:
709 ocfs2_generic_handle_downconvert_action(lockres);
712 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u "
713 "lockres flags = 0x%lx, unlock action: %u\n",
714 lockres->l_name, lockres->l_action, lockres->l_flags,
715 lockres->l_unlock_action);
719 /* set it to something invalid so if we get called again we
721 lockres->l_action = OCFS2_AST_INVALID;
723 wake_up(&lockres->l_event);
724 spin_unlock_irqrestore(&lockres->l_lock, flags);
727 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
733 spin_lock_irqsave(&lockres->l_lock, flags);
734 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
736 lockres->l_action = OCFS2_AST_INVALID;
738 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
739 spin_unlock_irqrestore(&lockres->l_lock, flags);
741 wake_up(&lockres->l_event);
745 /* Note: If we detect another process working on the lock (i.e.,
746 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
747 * to do the right thing in that case.
749 static int ocfs2_lock_create(struct ocfs2_super *osb,
750 struct ocfs2_lock_res *lockres,
755 enum dlm_status status;
760 mlog(0, "lock %s, level = %d, flags = %d\n", lockres->l_name, level,
763 spin_lock_irqsave(&lockres->l_lock, flags);
764 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
765 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
766 spin_unlock_irqrestore(&lockres->l_lock, flags);
770 lockres->l_action = OCFS2_AST_ATTACH;
771 lockres->l_requested = level;
772 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
773 spin_unlock_irqrestore(&lockres->l_lock, flags);
775 status = dlmlock(osb->dlm,
780 OCFS2_LOCK_ID_MAX_LEN - 1,
784 if (status != DLM_NORMAL) {
785 ocfs2_log_dlm_error("dlmlock", status, lockres);
787 ocfs2_recover_from_dlm_error(lockres, 1);
790 mlog(0, "lock %s, successfull return from dlmlock\n", lockres->l_name);
797 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
803 spin_lock_irqsave(&lockres->l_lock, flags);
804 ret = lockres->l_flags & flag;
805 spin_unlock_irqrestore(&lockres->l_lock, flags);
810 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
813 wait_event(lockres->l_event,
814 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
817 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
820 wait_event(lockres->l_event,
821 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
824 /* predict what lock level we'll be dropping down to on behalf
825 * of another node, and return true if the currently wanted
826 * level will be compatible with it. */
827 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
830 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
832 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
835 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
837 INIT_LIST_HEAD(&mw->mw_item);
838 init_completion(&mw->mw_complete);
841 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
843 wait_for_completion(&mw->mw_complete);
844 /* Re-arm the completion in case we want to wait on it again */
845 INIT_COMPLETION(mw->mw_complete);
846 return mw->mw_status;
849 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
850 struct ocfs2_mask_waiter *mw,
854 BUG_ON(!list_empty(&mw->mw_item));
856 assert_spin_locked(&lockres->l_lock);
858 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
863 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
864 * if the mask still hadn't reached its goal */
865 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
866 struct ocfs2_mask_waiter *mw)
871 spin_lock_irqsave(&lockres->l_lock, flags);
872 if (!list_empty(&mw->mw_item)) {
873 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
876 list_del_init(&mw->mw_item);
877 init_completion(&mw->mw_complete);
879 spin_unlock_irqrestore(&lockres->l_lock, flags);
885 static int ocfs2_cluster_lock(struct ocfs2_super *osb,
886 struct ocfs2_lock_res *lockres,
891 struct ocfs2_mask_waiter mw;
892 enum dlm_status status;
893 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
894 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
899 ocfs2_init_mask_waiter(&mw);
901 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
902 lkm_flags |= LKM_VALBLK;
907 if (catch_signals && signal_pending(current)) {
912 spin_lock_irqsave(&lockres->l_lock, flags);
914 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
915 "Cluster lock called on freeing lockres %s! flags "
916 "0x%lx\n", lockres->l_name, lockres->l_flags);
918 /* We only compare against the currently granted level
919 * here. If the lock is blocked waiting on a downconvert,
920 * we'll get caught below. */
921 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
922 level > lockres->l_level) {
923 /* is someone sitting in dlm_lock? If so, wait on
925 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
930 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
931 /* lock has not been created yet. */
932 spin_unlock_irqrestore(&lockres->l_lock, flags);
934 ret = ocfs2_lock_create(osb, lockres, LKM_NLMODE, 0);
942 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
943 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
944 /* is the lock is currently blocked on behalf of
946 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
951 if (level > lockres->l_level) {
952 if (lockres->l_action != OCFS2_AST_INVALID)
953 mlog(ML_ERROR, "lockres %s has action %u pending\n",
954 lockres->l_name, lockres->l_action);
956 lockres->l_action = OCFS2_AST_CONVERT;
957 lockres->l_requested = level;
958 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
959 spin_unlock_irqrestore(&lockres->l_lock, flags);
961 BUG_ON(level == LKM_IVMODE);
962 BUG_ON(level == LKM_NLMODE);
964 mlog(0, "lock %s, convert from %d to level = %d\n",
965 lockres->l_name, lockres->l_level, level);
967 /* call dlm_lock to upgrade lock now */
968 status = dlmlock(osb->dlm,
971 lkm_flags|LKM_CONVERT,
973 OCFS2_LOCK_ID_MAX_LEN - 1,
977 if (status != DLM_NORMAL) {
978 if ((lkm_flags & LKM_NOQUEUE) &&
979 (status == DLM_NOTQUEUED))
982 ocfs2_log_dlm_error("dlmlock", status,
986 ocfs2_recover_from_dlm_error(lockres, 1);
990 mlog(0, "lock %s, successfull return from dlmlock\n",
993 /* At this point we've gone inside the dlm and need to
994 * complete our work regardless. */
997 /* wait for busy to clear and carry on */
1001 /* Ok, if we get here then we're good to go. */
1002 ocfs2_inc_holders(lockres, level);
1006 spin_unlock_irqrestore(&lockres->l_lock, flags);
1009 * This is helping work around a lock inversion between the page lock
1010 * and dlm locks. One path holds the page lock while calling aops
1011 * which block acquiring dlm locks. The voting thread holds dlm
1012 * locks while acquiring page locks while down converting data locks.
1013 * This block is helping an aop path notice the inversion and back
1014 * off to unlock its page lock before trying the dlm lock again.
1016 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1017 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1019 if (lockres_remove_mask_waiter(lockres, &mw))
1025 ret = ocfs2_wait_for_mask(&mw);
1035 static void ocfs2_cluster_unlock(struct ocfs2_super *osb,
1036 struct ocfs2_lock_res *lockres,
1039 unsigned long flags;
1042 spin_lock_irqsave(&lockres->l_lock, flags);
1043 ocfs2_dec_holders(lockres, level);
1044 ocfs2_vote_on_unlock(osb, lockres);
1045 spin_unlock_irqrestore(&lockres->l_lock, flags);
1049 int ocfs2_create_new_lock(struct ocfs2_super *osb,
1050 struct ocfs2_lock_res *lockres,
1054 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1055 unsigned long flags;
1056 int lkm_flags = local ? LKM_LOCAL : 0;
1058 spin_lock_irqsave(&lockres->l_lock, flags);
1059 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1060 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1061 spin_unlock_irqrestore(&lockres->l_lock, flags);
1063 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1066 /* Grants us an EX lock on the data and metadata resources, skipping
1067 * the normal cluster directory lookup. Use this ONLY on newly created
1068 * inodes which other nodes can't possibly see, and which haven't been
1069 * hashed in the inode hash yet. This can give us a good performance
1070 * increase as it'll skip the network broadcast normally associated
1071 * with creating a new lock resource. */
1072 int ocfs2_create_new_inode_locks(struct inode *inode)
1075 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1078 BUG_ON(!ocfs2_inode_is_new(inode));
1082 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1084 /* NOTE: That we don't increment any of the holder counts, nor
1085 * do we add anything to a journal handle. Since this is
1086 * supposed to be a new inode which the cluster doesn't know
1087 * about yet, there is no need to. As far as the LVB handling
1088 * is concerned, this is basically like acquiring an EX lock
1089 * on a resource which has an invalid one -- we'll set it
1090 * valid when we release the EX. */
1092 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1099 * We don't want to use LKM_LOCAL on a meta data lock as they
1100 * don't use a generation in their lock names.
1102 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_meta_lockres, 1, 0);
1108 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_data_lockres, 1, 1);
1119 int ocfs2_rw_lock(struct inode *inode, int write)
1122 struct ocfs2_lock_res *lockres;
1128 mlog(0, "inode %llu take %s RW lock\n",
1129 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1130 write ? "EXMODE" : "PRMODE");
1132 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1134 level = write ? LKM_EXMODE : LKM_PRMODE;
1136 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1145 void ocfs2_rw_unlock(struct inode *inode, int write)
1147 int level = write ? LKM_EXMODE : LKM_PRMODE;
1148 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1152 mlog(0, "inode %llu drop %s RW lock\n",
1153 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1154 write ? "EXMODE" : "PRMODE");
1156 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1161 int ocfs2_data_lock_full(struct inode *inode,
1165 int status = 0, level;
1166 struct ocfs2_lock_res *lockres;
1172 mlog(0, "inode %llu take %s DATA lock\n",
1173 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1174 write ? "EXMODE" : "PRMODE");
1176 /* We'll allow faking a readonly data lock for
1178 if (ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb))) {
1186 lockres = &OCFS2_I(inode)->ip_data_lockres;
1188 level = write ? LKM_EXMODE : LKM_PRMODE;
1190 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level,
1192 if (status < 0 && status != -EAGAIN)
1200 /* see ocfs2_meta_lock_with_page() */
1201 int ocfs2_data_lock_with_page(struct inode *inode,
1207 ret = ocfs2_data_lock_full(inode, write, OCFS2_LOCK_NONBLOCK);
1208 if (ret == -EAGAIN) {
1210 if (ocfs2_data_lock(inode, write) == 0)
1211 ocfs2_data_unlock(inode, write);
1212 ret = AOP_TRUNCATED_PAGE;
1218 static void ocfs2_vote_on_unlock(struct ocfs2_super *osb,
1219 struct ocfs2_lock_res *lockres)
1225 /* If we know that another node is waiting on our lock, kick
1226 * the vote thread * pre-emptively when we reach a release
1228 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
1229 switch(lockres->l_blocking) {
1231 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
1235 if (!lockres->l_ex_holders)
1244 ocfs2_kick_vote_thread(osb);
1249 void ocfs2_data_unlock(struct inode *inode,
1252 int level = write ? LKM_EXMODE : LKM_PRMODE;
1253 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_data_lockres;
1257 mlog(0, "inode %llu drop %s DATA lock\n",
1258 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1259 write ? "EXMODE" : "PRMODE");
1261 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
1262 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1267 #define OCFS2_SEC_BITS 34
1268 #define OCFS2_SEC_SHIFT (64 - 34)
1269 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
1271 /* LVB only has room for 64 bits of time here so we pack it for
1273 static u64 ocfs2_pack_timespec(struct timespec *spec)
1276 u64 sec = spec->tv_sec;
1277 u32 nsec = spec->tv_nsec;
1279 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
1284 /* Call this with the lockres locked. I am reasonably sure we don't
1285 * need ip_lock in this function as anyone who would be changing those
1286 * values is supposed to be blocked in ocfs2_meta_lock right now. */
1287 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
1289 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1290 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1291 struct ocfs2_meta_lvb *lvb;
1295 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1298 * Invalidate the LVB of a deleted inode - this way other
1299 * nodes are forced to go to disk and discover the new inode
1302 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1303 lvb->lvb_version = 0;
1307 lvb->lvb_version = OCFS2_LVB_VERSION;
1308 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
1309 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
1310 lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
1311 lvb->lvb_igid = cpu_to_be32(inode->i_gid);
1312 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
1313 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
1314 lvb->lvb_iatime_packed =
1315 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
1316 lvb->lvb_ictime_packed =
1317 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
1318 lvb->lvb_imtime_packed =
1319 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
1320 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
1321 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
1324 mlog_meta_lvb(0, lockres);
1329 static void ocfs2_unpack_timespec(struct timespec *spec,
1332 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
1333 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
1336 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
1338 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1339 struct ocfs2_lock_res *lockres = &oi->ip_meta_lockres;
1340 struct ocfs2_meta_lvb *lvb;
1344 mlog_meta_lvb(0, lockres);
1346 lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1348 /* We're safe here without the lockres lock... */
1349 spin_lock(&oi->ip_lock);
1350 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
1351 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
1353 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
1354 ocfs2_set_inode_flags(inode);
1356 /* fast-symlinks are a special case */
1357 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
1358 inode->i_blocks = 0;
1361 ocfs2_align_bytes_to_sectors(i_size_read(inode));
1363 inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
1364 inode->i_gid = be32_to_cpu(lvb->lvb_igid);
1365 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
1366 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
1367 ocfs2_unpack_timespec(&inode->i_atime,
1368 be64_to_cpu(lvb->lvb_iatime_packed));
1369 ocfs2_unpack_timespec(&inode->i_mtime,
1370 be64_to_cpu(lvb->lvb_imtime_packed));
1371 ocfs2_unpack_timespec(&inode->i_ctime,
1372 be64_to_cpu(lvb->lvb_ictime_packed));
1373 spin_unlock(&oi->ip_lock);
1378 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
1379 struct ocfs2_lock_res *lockres)
1381 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
1383 if (lvb->lvb_version == OCFS2_LVB_VERSION
1384 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
1389 /* Determine whether a lock resource needs to be refreshed, and
1390 * arbitrate who gets to refresh it.
1392 * 0 means no refresh needed.
1394 * > 0 means you need to refresh this and you MUST call
1395 * ocfs2_complete_lock_res_refresh afterwards. */
1396 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
1398 unsigned long flags;
1404 spin_lock_irqsave(&lockres->l_lock, flags);
1405 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
1406 spin_unlock_irqrestore(&lockres->l_lock, flags);
1410 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
1411 spin_unlock_irqrestore(&lockres->l_lock, flags);
1413 ocfs2_wait_on_refreshing_lock(lockres);
1417 /* Ok, I'll be the one to refresh this lock. */
1418 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
1419 spin_unlock_irqrestore(&lockres->l_lock, flags);
1427 /* If status is non zero, I'll mark it as not being in refresh
1428 * anymroe, but i won't clear the needs refresh flag. */
1429 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
1432 unsigned long flags;
1435 spin_lock_irqsave(&lockres->l_lock, flags);
1436 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
1438 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
1439 spin_unlock_irqrestore(&lockres->l_lock, flags);
1441 wake_up(&lockres->l_event);
1446 /* may or may not return a bh if it went to disk. */
1447 static int ocfs2_meta_lock_update(struct inode *inode,
1448 struct buffer_head **bh)
1451 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1452 struct ocfs2_lock_res *lockres;
1453 struct ocfs2_dinode *fe;
1457 spin_lock(&oi->ip_lock);
1458 if (oi->ip_flags & OCFS2_INODE_DELETED) {
1459 mlog(0, "Orphaned inode %llu was deleted while we "
1460 "were waiting on a lock. ip_flags = 0x%x\n",
1461 (unsigned long long)oi->ip_blkno, oi->ip_flags);
1462 spin_unlock(&oi->ip_lock);
1466 spin_unlock(&oi->ip_lock);
1468 lockres = &oi->ip_meta_lockres;
1470 if (!ocfs2_should_refresh_lock_res(lockres))
1473 /* This will discard any caching information we might have had
1474 * for the inode metadata. */
1475 ocfs2_metadata_cache_purge(inode);
1477 /* will do nothing for inode types that don't use the extent
1478 * map (directories, bitmap files, etc) */
1479 ocfs2_extent_map_trunc(inode, 0);
1481 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
1482 mlog(0, "Trusting LVB on inode %llu\n",
1483 (unsigned long long)oi->ip_blkno);
1484 ocfs2_refresh_inode_from_lvb(inode);
1486 /* Boo, we have to go to disk. */
1487 /* read bh, cast, ocfs2_refresh_inode */
1488 status = ocfs2_read_block(OCFS2_SB(inode->i_sb), oi->ip_blkno,
1489 bh, OCFS2_BH_CACHED, inode);
1494 fe = (struct ocfs2_dinode *) (*bh)->b_data;
1496 /* This is a good chance to make sure we're not
1497 * locking an invalid object.
1499 * We bug on a stale inode here because we checked
1500 * above whether it was wiped from disk. The wiping
1501 * node provides a guarantee that we receive that
1502 * message and can mark the inode before dropping any
1503 * locks associated with it. */
1504 if (!OCFS2_IS_VALID_DINODE(fe)) {
1505 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
1509 mlog_bug_on_msg(inode->i_generation !=
1510 le32_to_cpu(fe->i_generation),
1511 "Invalid dinode %llu disk generation: %u "
1512 "inode->i_generation: %u\n",
1513 (unsigned long long)oi->ip_blkno,
1514 le32_to_cpu(fe->i_generation),
1515 inode->i_generation);
1516 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
1517 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
1518 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
1519 (unsigned long long)oi->ip_blkno,
1520 (unsigned long long)le64_to_cpu(fe->i_dtime),
1521 le32_to_cpu(fe->i_flags));
1523 ocfs2_refresh_inode(inode, fe);
1528 ocfs2_complete_lock_res_refresh(lockres, status);
1534 static int ocfs2_assign_bh(struct inode *inode,
1535 struct buffer_head **ret_bh,
1536 struct buffer_head *passed_bh)
1541 /* Ok, the update went to disk for us, use the
1543 *ret_bh = passed_bh;
1549 status = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1550 OCFS2_I(inode)->ip_blkno,
1561 * returns < 0 error if the callback will never be called, otherwise
1562 * the result of the lock will be communicated via the callback.
1564 int ocfs2_meta_lock_full(struct inode *inode,
1565 struct ocfs2_journal_handle *handle,
1566 struct buffer_head **ret_bh,
1570 int status, level, dlm_flags, acquired;
1571 struct ocfs2_lock_res *lockres;
1572 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1573 struct buffer_head *local_bh = NULL;
1579 mlog(0, "inode %llu, take %s META lock\n",
1580 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1581 ex ? "EXMODE" : "PRMODE");
1585 /* We'll allow faking a readonly metadata lock for
1587 if (ocfs2_is_hard_readonly(osb)) {
1593 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1594 wait_event(osb->recovery_event,
1595 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1598 lockres = &OCFS2_I(inode)->ip_meta_lockres;
1599 level = ex ? LKM_EXMODE : LKM_PRMODE;
1601 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
1602 dlm_flags |= LKM_NOQUEUE;
1604 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags);
1606 if (status != -EAGAIN && status != -EIOCBRETRY)
1611 /* Notify the error cleanup path to drop the cluster lock. */
1614 /* We wait twice because a node may have died while we were in
1615 * the lower dlm layers. The second time though, we've
1616 * committed to owning this lock so we don't allow signals to
1617 * abort the operation. */
1618 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
1619 wait_event(osb->recovery_event,
1620 ocfs2_node_map_is_empty(osb, &osb->recovery_map));
1623 * We only see this flag if we're being called from
1624 * ocfs2_read_locked_inode(). It means we're locking an inode
1625 * which hasn't been populated yet, so clear the refresh flag
1626 * and let the caller handle it.
1628 if (inode->i_state & I_NEW) {
1630 ocfs2_complete_lock_res_refresh(lockres, 0);
1634 /* This is fun. The caller may want a bh back, or it may
1635 * not. ocfs2_meta_lock_update definitely wants one in, but
1636 * may or may not read one, depending on what's in the
1637 * LVB. The result of all of this is that we've *only* gone to
1638 * disk if we have to, so the complexity is worthwhile. */
1639 status = ocfs2_meta_lock_update(inode, &local_bh);
1641 if (status != -ENOENT)
1647 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
1655 status = ocfs2_handle_add_lock(handle, inode);
1662 if (ret_bh && (*ret_bh)) {
1667 ocfs2_meta_unlock(inode, ex);
1678 * This is working around a lock inversion between tasks acquiring DLM locks
1679 * while holding a page lock and the vote thread which blocks dlm lock acquiry
1680 * while acquiring page locks.
1682 * ** These _with_page variantes are only intended to be called from aop
1683 * methods that hold page locks and return a very specific *positive* error
1684 * code that aop methods pass up to the VFS -- test for errors with != 0. **
1686 * The DLM is called such that it returns -EAGAIN if it would have blocked
1687 * waiting for the vote thread. In that case we unlock our page so the vote
1688 * thread can make progress. Once we've done this we have to return
1689 * AOP_TRUNCATED_PAGE so the aop method that called us can bubble that back up
1690 * into the VFS who will then immediately retry the aop call.
1692 * We do a blocking lock and immediate unlock before returning, though, so that
1693 * the lock has a great chance of being cached on this node by the time the VFS
1694 * calls back to retry the aop. This has a potential to livelock as nodes
1695 * ping locks back and forth, but that's a risk we're willing to take to avoid
1696 * the lock inversion simply.
1698 int ocfs2_meta_lock_with_page(struct inode *inode,
1699 struct ocfs2_journal_handle *handle,
1700 struct buffer_head **ret_bh,
1706 ret = ocfs2_meta_lock_full(inode, handle, ret_bh, ex,
1707 OCFS2_LOCK_NONBLOCK);
1708 if (ret == -EAGAIN) {
1710 if (ocfs2_meta_lock(inode, handle, ret_bh, ex) == 0)
1711 ocfs2_meta_unlock(inode, ex);
1712 ret = AOP_TRUNCATED_PAGE;
1718 void ocfs2_meta_unlock(struct inode *inode,
1721 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1722 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_meta_lockres;
1726 mlog(0, "inode %llu drop %s META lock\n",
1727 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1728 ex ? "EXMODE" : "PRMODE");
1730 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)))
1731 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1736 int ocfs2_super_lock(struct ocfs2_super *osb,
1740 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1741 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1742 struct buffer_head *bh;
1743 struct ocfs2_slot_info *si = osb->slot_info;
1747 if (ocfs2_is_hard_readonly(osb))
1750 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
1756 /* The super block lock path is really in the best position to
1757 * know when resources covered by the lock need to be
1758 * refreshed, so we do it here. Of course, making sense of
1759 * everything is up to the caller :) */
1760 status = ocfs2_should_refresh_lock_res(lockres);
1767 status = ocfs2_read_block(osb, bh->b_blocknr, &bh, 0,
1770 ocfs2_update_slot_info(si);
1772 ocfs2_complete_lock_res_refresh(lockres, status);
1782 void ocfs2_super_unlock(struct ocfs2_super *osb,
1785 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1786 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
1788 ocfs2_cluster_unlock(osb, lockres, level);
1791 int ocfs2_rename_lock(struct ocfs2_super *osb)
1794 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1796 if (ocfs2_is_hard_readonly(osb))
1799 status = ocfs2_cluster_lock(osb, lockres, LKM_EXMODE, 0, 0);
1806 void ocfs2_rename_unlock(struct ocfs2_super *osb)
1808 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
1810 ocfs2_cluster_unlock(osb, lockres, LKM_EXMODE);
1813 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
1816 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1817 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
1818 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
1822 if (ocfs2_is_hard_readonly(osb))
1825 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
1832 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
1834 int level = ex ? LKM_EXMODE : LKM_PRMODE;
1835 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
1836 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
1838 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
1841 /* Reference counting of the dlm debug structure. We want this because
1842 * open references on the debug inodes can live on after a mount, so
1843 * we can't rely on the ocfs2_super to always exist. */
1844 static void ocfs2_dlm_debug_free(struct kref *kref)
1846 struct ocfs2_dlm_debug *dlm_debug;
1848 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
1853 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
1856 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
1859 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
1861 kref_get(&debug->d_refcnt);
1864 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
1866 struct ocfs2_dlm_debug *dlm_debug;
1868 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
1870 mlog_errno(-ENOMEM);
1874 kref_init(&dlm_debug->d_refcnt);
1875 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
1876 dlm_debug->d_locking_state = NULL;
1881 /* Access to this is arbitrated for us via seq_file->sem. */
1882 struct ocfs2_dlm_seq_priv {
1883 struct ocfs2_dlm_debug *p_dlm_debug;
1884 struct ocfs2_lock_res p_iter_res;
1885 struct ocfs2_lock_res p_tmp_res;
1888 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
1889 struct ocfs2_dlm_seq_priv *priv)
1891 struct ocfs2_lock_res *iter, *ret = NULL;
1892 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
1894 assert_spin_locked(&ocfs2_dlm_tracking_lock);
1896 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
1897 /* discover the head of the list */
1898 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
1899 mlog(0, "End of list found, %p\n", ret);
1903 /* We track our "dummy" iteration lockres' by a NULL
1905 if (iter->l_ops != NULL) {
1914 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
1916 struct ocfs2_dlm_seq_priv *priv = m->private;
1917 struct ocfs2_lock_res *iter;
1919 spin_lock(&ocfs2_dlm_tracking_lock);
1920 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
1922 /* Since lockres' have the lifetime of their container
1923 * (which can be inodes, ocfs2_supers, etc) we want to
1924 * copy this out to a temporary lockres while still
1925 * under the spinlock. Obviously after this we can't
1926 * trust any pointers on the copy returned, but that's
1927 * ok as the information we want isn't typically held
1929 priv->p_tmp_res = *iter;
1930 iter = &priv->p_tmp_res;
1932 spin_unlock(&ocfs2_dlm_tracking_lock);
1937 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
1941 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
1943 struct ocfs2_dlm_seq_priv *priv = m->private;
1944 struct ocfs2_lock_res *iter = v;
1945 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
1947 spin_lock(&ocfs2_dlm_tracking_lock);
1948 iter = ocfs2_dlm_next_res(iter, priv);
1949 list_del_init(&dummy->l_debug_list);
1951 list_add(&dummy->l_debug_list, &iter->l_debug_list);
1952 priv->p_tmp_res = *iter;
1953 iter = &priv->p_tmp_res;
1955 spin_unlock(&ocfs2_dlm_tracking_lock);
1960 /* So that debugfs.ocfs2 can determine which format is being used */
1961 #define OCFS2_DLM_DEBUG_STR_VERSION 1
1962 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
1966 struct ocfs2_lock_res *lockres = v;
1971 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
1973 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
1974 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
1976 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
1978 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
1980 seq_printf(m, "%d\t"
1991 lockres->l_unlock_action,
1992 lockres->l_ro_holders,
1993 lockres->l_ex_holders,
1994 lockres->l_requested,
1995 lockres->l_blocking);
1997 /* Dump the raw LVB */
1998 lvb = lockres->l_lksb.lvb;
1999 for(i = 0; i < DLM_LVB_LEN; i++)
2000 seq_printf(m, "0x%x\t", lvb[i]);
2003 seq_printf(m, "\n");
2007 static struct seq_operations ocfs2_dlm_seq_ops = {
2008 .start = ocfs2_dlm_seq_start,
2009 .stop = ocfs2_dlm_seq_stop,
2010 .next = ocfs2_dlm_seq_next,
2011 .show = ocfs2_dlm_seq_show,
2014 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2016 struct seq_file *seq = (struct seq_file *) file->private_data;
2017 struct ocfs2_dlm_seq_priv *priv = seq->private;
2018 struct ocfs2_lock_res *res = &priv->p_iter_res;
2020 ocfs2_remove_lockres_tracking(res);
2021 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2022 return seq_release_private(inode, file);
2025 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2028 struct ocfs2_dlm_seq_priv *priv;
2029 struct seq_file *seq;
2030 struct ocfs2_super *osb;
2032 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
2038 osb = (struct ocfs2_super *) inode->u.generic_ip;
2039 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2040 priv->p_dlm_debug = osb->osb_dlm_debug;
2041 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2043 ret = seq_open(file, &ocfs2_dlm_seq_ops);
2050 seq = (struct seq_file *) file->private_data;
2051 seq->private = priv;
2053 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2060 static const struct file_operations ocfs2_dlm_debug_fops = {
2061 .open = ocfs2_dlm_debug_open,
2062 .release = ocfs2_dlm_debug_release,
2064 .llseek = seq_lseek,
2067 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2070 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2072 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2074 osb->osb_debug_root,
2076 &ocfs2_dlm_debug_fops);
2077 if (!dlm_debug->d_locking_state) {
2080 "Unable to create locking state debugfs file.\n");
2084 ocfs2_get_dlm_debug(dlm_debug);
2089 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2091 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2094 debugfs_remove(dlm_debug->d_locking_state);
2095 ocfs2_put_dlm_debug(dlm_debug);
2099 int ocfs2_dlm_init(struct ocfs2_super *osb)
2103 struct dlm_ctxt *dlm;
2107 status = ocfs2_dlm_init_debug(osb);
2113 /* launch vote thread */
2114 osb->vote_task = kthread_run(ocfs2_vote_thread, osb, "ocfs2vote");
2115 if (IS_ERR(osb->vote_task)) {
2116 status = PTR_ERR(osb->vote_task);
2117 osb->vote_task = NULL;
2122 /* used by the dlm code to make message headers unique, each
2123 * node in this domain must agree on this. */
2124 dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
2126 /* for now, uuid == domain */
2127 dlm = dlm_register_domain(osb->uuid_str, dlm_key);
2129 status = PTR_ERR(dlm);
2134 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
2135 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
2137 dlm_register_eviction_cb(dlm, &osb->osb_eviction_cb);
2144 ocfs2_dlm_shutdown_debug(osb);
2146 kthread_stop(osb->vote_task);
2153 void ocfs2_dlm_shutdown(struct ocfs2_super *osb)
2157 dlm_unregister_eviction_cb(&osb->osb_eviction_cb);
2159 ocfs2_drop_osb_locks(osb);
2161 if (osb->vote_task) {
2162 kthread_stop(osb->vote_task);
2163 osb->vote_task = NULL;
2166 ocfs2_lock_res_free(&osb->osb_super_lockres);
2167 ocfs2_lock_res_free(&osb->osb_rename_lockres);
2169 dlm_unregister_domain(osb->dlm);
2172 ocfs2_dlm_shutdown_debug(osb);
2177 static void ocfs2_unlock_ast(void *opaque, enum dlm_status status)
2179 struct ocfs2_lock_res *lockres = opaque;
2180 unsigned long flags;
2184 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name,
2185 lockres->l_unlock_action);
2187 spin_lock_irqsave(&lockres->l_lock, flags);
2188 /* We tried to cancel a convert request, but it was already
2189 * granted. All we want to do here is clear our unlock
2190 * state. The wake_up call done at the bottom is redundant
2191 * (ocfs2_prepare_cancel_convert doesn't sleep on this) but doesn't
2192 * hurt anything anyway */
2193 if (status == DLM_CANCELGRANT &&
2194 lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2195 mlog(0, "Got cancelgrant for %s\n", lockres->l_name);
2197 /* We don't clear the busy flag in this case as it
2198 * should have been cleared by the ast which the dlm
2200 goto complete_unlock;
2203 if (status != DLM_NORMAL) {
2204 mlog(ML_ERROR, "Dlm passes status %d for lock %s, "
2205 "unlock_action %d\n", status, lockres->l_name,
2206 lockres->l_unlock_action);
2207 spin_unlock_irqrestore(&lockres->l_lock, flags);
2211 switch(lockres->l_unlock_action) {
2212 case OCFS2_UNLOCK_CANCEL_CONVERT:
2213 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
2214 lockres->l_action = OCFS2_AST_INVALID;
2216 case OCFS2_UNLOCK_DROP_LOCK:
2217 lockres->l_level = LKM_IVMODE;
2223 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
2225 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
2226 spin_unlock_irqrestore(&lockres->l_lock, flags);
2228 wake_up(&lockres->l_event);
2233 typedef void (ocfs2_pre_drop_cb_t)(struct ocfs2_lock_res *, void *);
2235 struct drop_lock_cb {
2236 ocfs2_pre_drop_cb_t *drop_func;
2240 static int ocfs2_drop_lock(struct ocfs2_super *osb,
2241 struct ocfs2_lock_res *lockres,
2242 struct drop_lock_cb *dcb)
2244 enum dlm_status status;
2245 unsigned long flags;
2248 /* We didn't get anywhere near actually using this lockres. */
2249 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
2252 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
2253 lkm_flags |= LKM_VALBLK;
2255 spin_lock_irqsave(&lockres->l_lock, flags);
2257 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
2258 "lockres %s, flags 0x%lx\n",
2259 lockres->l_name, lockres->l_flags);
2261 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
2262 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
2263 "%u, unlock_action = %u\n",
2264 lockres->l_name, lockres->l_flags, lockres->l_action,
2265 lockres->l_unlock_action);
2267 spin_unlock_irqrestore(&lockres->l_lock, flags);
2269 /* XXX: Today we just wait on any busy
2270 * locks... Perhaps we need to cancel converts in the
2272 ocfs2_wait_on_busy_lock(lockres);
2274 spin_lock_irqsave(&lockres->l_lock, flags);
2278 dcb->drop_func(lockres, dcb->drop_data);
2280 if (lockres->l_flags & OCFS2_LOCK_BUSY)
2281 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
2283 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
2284 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
2286 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
2287 spin_unlock_irqrestore(&lockres->l_lock, flags);
2291 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
2293 /* make sure we never get here while waiting for an ast to
2295 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
2297 /* is this necessary? */
2298 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2299 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
2300 spin_unlock_irqrestore(&lockres->l_lock, flags);
2302 mlog(0, "lock %s\n", lockres->l_name);
2304 status = dlmunlock(osb->dlm, &lockres->l_lksb, lkm_flags,
2305 ocfs2_unlock_ast, lockres);
2306 if (status != DLM_NORMAL) {
2307 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2308 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
2309 dlm_print_one_lock(lockres->l_lksb.lockid);
2312 mlog(0, "lock %s, successfull return from dlmunlock\n",
2315 ocfs2_wait_on_busy_lock(lockres);
2321 /* Mark the lockres as being dropped. It will no longer be
2322 * queued if blocking, but we still may have to wait on it
2323 * being dequeued from the vote thread before we can consider
2326 * You can *not* attempt to call cluster_lock on this lockres anymore. */
2327 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
2330 struct ocfs2_mask_waiter mw;
2331 unsigned long flags;
2333 ocfs2_init_mask_waiter(&mw);
2335 spin_lock_irqsave(&lockres->l_lock, flags);
2336 lockres->l_flags |= OCFS2_LOCK_FREEING;
2337 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
2338 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
2339 spin_unlock_irqrestore(&lockres->l_lock, flags);
2341 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
2343 status = ocfs2_wait_for_mask(&mw);
2347 spin_lock_irqsave(&lockres->l_lock, flags);
2349 spin_unlock_irqrestore(&lockres->l_lock, flags);
2352 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
2353 struct ocfs2_lock_res *lockres)
2357 ocfs2_mark_lockres_freeing(lockres);
2358 ret = ocfs2_drop_lock(osb, lockres, NULL);
2363 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
2365 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
2366 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
2369 static void ocfs2_meta_pre_drop(struct ocfs2_lock_res *lockres, void *data)
2371 struct inode *inode = data;
2373 /* the metadata lock requires a bit more work as we have an
2374 * LVB to worry about. */
2375 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
2376 lockres->l_level == LKM_EXMODE &&
2377 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2378 __ocfs2_stuff_meta_lvb(inode);
2381 int ocfs2_drop_inode_locks(struct inode *inode)
2384 struct drop_lock_cb meta_dcb = { ocfs2_meta_pre_drop, inode, };
2388 /* No need to call ocfs2_mark_lockres_freeing here -
2389 * ocfs2_clear_inode has done it for us. */
2391 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2392 &OCFS2_I(inode)->ip_data_lockres,
2399 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2400 &OCFS2_I(inode)->ip_meta_lockres,
2404 if (err < 0 && !status)
2407 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
2408 &OCFS2_I(inode)->ip_rw_lockres,
2412 if (err < 0 && !status)
2419 static void ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
2422 assert_spin_locked(&lockres->l_lock);
2424 BUG_ON(lockres->l_blocking <= LKM_NLMODE);
2426 if (lockres->l_level <= new_level) {
2427 mlog(ML_ERROR, "lockres->l_level (%u) <= new_level (%u)\n",
2428 lockres->l_level, new_level);
2432 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n",
2433 lockres->l_name, new_level, lockres->l_blocking);
2435 lockres->l_action = OCFS2_AST_DOWNCONVERT;
2436 lockres->l_requested = new_level;
2437 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
2440 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
2441 struct ocfs2_lock_res *lockres,
2445 int ret, dlm_flags = LKM_CONVERT;
2446 enum dlm_status status;
2451 dlm_flags |= LKM_VALBLK;
2453 status = dlmlock(osb->dlm,
2458 OCFS2_LOCK_ID_MAX_LEN - 1,
2461 ocfs2_blocking_ast);
2462 if (status != DLM_NORMAL) {
2463 ocfs2_log_dlm_error("dlmlock", status, lockres);
2465 ocfs2_recover_from_dlm_error(lockres, 1);
2475 /* returns 1 when the caller should unlock and call dlmunlock */
2476 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
2477 struct ocfs2_lock_res *lockres)
2479 assert_spin_locked(&lockres->l_lock);
2482 mlog(0, "lock %s\n", lockres->l_name);
2484 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
2485 /* If we're already trying to cancel a lock conversion
2486 * then just drop the spinlock and allow the caller to
2487 * requeue this lock. */
2489 mlog(0, "Lockres %s, skip convert\n", lockres->l_name);
2493 /* were we in a convert when we got the bast fire? */
2494 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
2495 lockres->l_action != OCFS2_AST_DOWNCONVERT);
2496 /* set things up for the unlockast to know to just
2497 * clear out the ast_action and unset busy, etc. */
2498 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
2500 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
2501 "lock %s, invalid flags: 0x%lx\n",
2502 lockres->l_name, lockres->l_flags);
2507 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
2508 struct ocfs2_lock_res *lockres)
2511 enum dlm_status status;
2514 mlog(0, "lock %s\n", lockres->l_name);
2517 status = dlmunlock(osb->dlm,
2522 if (status != DLM_NORMAL) {
2523 ocfs2_log_dlm_error("dlmunlock", status, lockres);
2525 ocfs2_recover_from_dlm_error(lockres, 0);
2528 mlog(0, "lock %s return from dlmunlock\n", lockres->l_name);
2534 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
2535 struct ocfs2_lock_res *lockres,
2536 struct ocfs2_unblock_ctl *ctl)
2538 unsigned long flags;
2546 spin_lock_irqsave(&lockres->l_lock, flags);
2548 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
2551 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
2553 ret = ocfs2_prepare_cancel_convert(osb, lockres);
2554 spin_unlock_irqrestore(&lockres->l_lock, flags);
2556 ret = ocfs2_cancel_convert(osb, lockres);
2563 /* if we're blocking an exclusive and we have *any* holders,
2565 if ((lockres->l_blocking == LKM_EXMODE)
2566 && (lockres->l_ex_holders || lockres->l_ro_holders))
2569 /* If it's a PR we're blocking, then only
2570 * requeue if we've got any EX holders */
2571 if (lockres->l_blocking == LKM_PRMODE &&
2572 lockres->l_ex_holders)
2576 * Can we get a lock in this state if the holder counts are
2577 * zero? The meta data unblock code used to check this.
2579 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
2580 && (lockres->l_flags & OCFS2_LOCK_REFRESHING))
2583 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
2585 if (lockres->l_ops->check_downconvert
2586 && !lockres->l_ops->check_downconvert(lockres, new_level))
2589 /* If we get here, then we know that there are no more
2590 * incompatible holders (and anyone asking for an incompatible
2591 * lock is blocked). We can now downconvert the lock */
2592 if (!lockres->l_ops->downconvert_worker)
2595 /* Some lockres types want to do a bit of work before
2596 * downconverting a lock. Allow that here. The worker function
2597 * may sleep, so we save off a copy of what we're blocking as
2598 * it may change while we're not holding the spin lock. */
2599 blocking = lockres->l_blocking;
2600 spin_unlock_irqrestore(&lockres->l_lock, flags);
2602 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
2604 if (ctl->unblock_action == UNBLOCK_STOP_POST)
2607 spin_lock_irqsave(&lockres->l_lock, flags);
2608 if (blocking != lockres->l_blocking) {
2609 /* If this changed underneath us, then we can't drop
2617 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
2618 if (lockres->l_level == LKM_EXMODE)
2622 * We only set the lvb if the lock has been fully
2623 * refreshed - otherwise we risk setting stale
2624 * data. Otherwise, there's no need to actually clear
2625 * out the lvb here as it's value is still valid.
2627 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
2628 lockres->l_ops->set_lvb(lockres);
2631 ocfs2_prepare_downconvert(lockres, new_level);
2632 spin_unlock_irqrestore(&lockres->l_lock, flags);
2633 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb);
2639 spin_unlock_irqrestore(&lockres->l_lock, flags);
2646 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
2649 struct inode *inode;
2650 struct address_space *mapping;
2652 inode = ocfs2_lock_res_inode(lockres);
2653 mapping = inode->i_mapping;
2655 if (filemap_fdatawrite(mapping)) {
2656 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
2657 (unsigned long long)OCFS2_I(inode)->ip_blkno);
2659 sync_mapping_buffers(mapping);
2660 if (blocking == LKM_EXMODE) {
2661 truncate_inode_pages(mapping, 0);
2662 unmap_mapping_range(mapping, 0, 0, 0);
2664 /* We only need to wait on the I/O if we're not also
2665 * truncating pages because truncate_inode_pages waits
2666 * for us above. We don't truncate pages if we're
2667 * blocking anything < EXMODE because we want to keep
2668 * them around in that case. */
2669 filemap_fdatawait(mapping);
2672 return UNBLOCK_CONTINUE;
2675 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
2678 struct inode *inode = ocfs2_lock_res_inode(lockres);
2679 int checkpointed = ocfs2_inode_fully_checkpointed(inode);
2681 BUG_ON(new_level != LKM_NLMODE && new_level != LKM_PRMODE);
2682 BUG_ON(lockres->l_level != LKM_EXMODE && !checkpointed);
2687 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb));
2691 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
2693 struct inode *inode = ocfs2_lock_res_inode(lockres);
2695 __ocfs2_stuff_meta_lvb(inode);
2699 * Does the final reference drop on our dentry lock. Right now this
2700 * happens in the vote thread, but we could choose to simplify the
2701 * dlmglue API and push these off to the ocfs2_wq in the future.
2703 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
2704 struct ocfs2_lock_res *lockres)
2706 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2707 ocfs2_dentry_lock_put(osb, dl);
2711 * d_delete() matching dentries before the lock downconvert.
2713 * At this point, any process waiting to destroy the
2714 * dentry_lock due to last ref count is stopped by the
2715 * OCFS2_LOCK_QUEUED flag.
2717 * We have two potential problems
2719 * 1) If we do the last reference drop on our dentry_lock (via dput)
2720 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
2721 * the downconvert to finish. Instead we take an elevated
2722 * reference and push the drop until after we've completed our
2723 * unblock processing.
2725 * 2) There might be another process with a final reference,
2726 * waiting on us to finish processing. If this is the case, we
2727 * detect it and exit out - there's no more dentries anyway.
2729 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
2732 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
2733 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
2734 struct dentry *dentry;
2735 unsigned long flags;
2739 * This node is blocking another node from getting a read
2740 * lock. This happens when we've renamed within a
2741 * directory. We've forced the other nodes to d_delete(), but
2742 * we never actually dropped our lock because it's still
2743 * valid. The downconvert code will retain a PR for this node,
2744 * so there's no further work to do.
2746 if (blocking == LKM_PRMODE)
2747 return UNBLOCK_CONTINUE;
2750 * Mark this inode as potentially orphaned. The code in
2751 * ocfs2_delete_inode() will figure out whether it actually
2752 * needs to be freed or not.
2754 spin_lock(&oi->ip_lock);
2755 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
2756 spin_unlock(&oi->ip_lock);
2759 * Yuck. We need to make sure however that the check of
2760 * OCFS2_LOCK_FREEING and the extra reference are atomic with
2761 * respect to a reference decrement or the setting of that
2764 spin_lock_irqsave(&lockres->l_lock, flags);
2765 spin_lock(&dentry_attach_lock);
2766 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
2771 spin_unlock(&dentry_attach_lock);
2772 spin_unlock_irqrestore(&lockres->l_lock, flags);
2774 mlog(0, "extra_ref = %d\n", extra_ref);
2777 * We have a process waiting on us in ocfs2_dentry_iput(),
2778 * which means we can't have any more outstanding
2779 * aliases. There's no need to do any more work.
2782 return UNBLOCK_CONTINUE;
2784 spin_lock(&dentry_attach_lock);
2786 dentry = ocfs2_find_local_alias(dl->dl_inode,
2787 dl->dl_parent_blkno, 1);
2790 spin_unlock(&dentry_attach_lock);
2792 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
2793 dentry->d_name.name);
2796 * The following dcache calls may do an
2797 * iput(). Normally we don't want that from the
2798 * downconverting thread, but in this case it's ok
2799 * because the requesting node already has an
2800 * exclusive lock on the inode, so it can't be queued
2801 * for a downconvert.
2806 spin_lock(&dentry_attach_lock);
2808 spin_unlock(&dentry_attach_lock);
2811 * If we are the last holder of this dentry lock, there is no
2812 * reason to downconvert so skip straight to the unlock.
2814 if (dl->dl_count == 1)
2815 return UNBLOCK_STOP_POST;
2817 return UNBLOCK_CONTINUE_POST;
2820 void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
2821 struct ocfs2_lock_res *lockres)
2824 struct ocfs2_unblock_ctl ctl = {0, 0,};
2825 unsigned long flags;
2827 /* Our reference to the lockres in this function can be
2828 * considered valid until we remove the OCFS2_LOCK_QUEUED
2834 BUG_ON(!lockres->l_ops);
2836 mlog(0, "lockres %s blocked.\n", lockres->l_name);
2838 /* Detect whether a lock has been marked as going away while
2839 * the vote thread was processing other things. A lock can
2840 * still be marked with OCFS2_LOCK_FREEING after this check,
2841 * but short circuiting here will still save us some
2843 spin_lock_irqsave(&lockres->l_lock, flags);
2844 if (lockres->l_flags & OCFS2_LOCK_FREEING)
2846 spin_unlock_irqrestore(&lockres->l_lock, flags);
2848 status = ocfs2_unblock_lock(osb, lockres, &ctl);
2852 spin_lock_irqsave(&lockres->l_lock, flags);
2854 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
2855 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
2857 ocfs2_schedule_blocked_lock(osb, lockres);
2859 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name,
2860 ctl.requeue ? "yes" : "no");
2861 spin_unlock_irqrestore(&lockres->l_lock, flags);
2863 if (ctl.unblock_action != UNBLOCK_CONTINUE
2864 && lockres->l_ops->post_unlock)
2865 lockres->l_ops->post_unlock(osb, lockres);
2870 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
2871 struct ocfs2_lock_res *lockres)
2875 assert_spin_locked(&lockres->l_lock);
2877 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
2878 /* Do not schedule a lock for downconvert when it's on
2879 * the way to destruction - any nodes wanting access
2880 * to the resource will get it soon. */
2881 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n",
2882 lockres->l_name, lockres->l_flags);
2886 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
2888 spin_lock(&osb->vote_task_lock);
2889 if (list_empty(&lockres->l_blocked_list)) {
2890 list_add_tail(&lockres->l_blocked_list,
2891 &osb->blocked_lock_list);
2892 osb->blocked_lock_count++;
2894 spin_unlock(&osb->vote_task_lock);
2899 /* This aids in debugging situations where a bad LVB might be involved. */
2900 void ocfs2_dump_meta_lvb_info(u64 level,
2901 const char *function,
2903 struct ocfs2_lock_res *lockres)
2905 struct ocfs2_meta_lvb *lvb = (struct ocfs2_meta_lvb *) lockres->l_lksb.lvb;
2907 mlog(level, "LVB information for %s (called from %s:%u):\n",
2908 lockres->l_name, function, line);
2909 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
2910 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
2911 be32_to_cpu(lvb->lvb_igeneration));
2912 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
2913 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
2914 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
2915 be16_to_cpu(lvb->lvb_imode));
2916 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
2917 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
2918 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
2919 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
2920 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
2921 be32_to_cpu(lvb->lvb_iattr));