]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/ocfs2/dlmglue.c
ocfs2/dlmglue: prepare tracking logic to avoid recursive cluster lock
[karo-tx-linux.git] / fs / ocfs2 / dlmglue.c
index 77d1632e905d8b1ec249cf86c7ece183c38f2746..8dce4099a6cae277690c1da505270c17477496ff 100644 (file)
@@ -532,6 +532,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
        init_waitqueue_head(&res->l_event);
        INIT_LIST_HEAD(&res->l_blocked_list);
        INIT_LIST_HEAD(&res->l_mask_waiters);
+       INIT_LIST_HEAD(&res->l_holders);
 }
 
 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
@@ -749,6 +750,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
        res->l_flags = 0UL;
 }
 
+/*
+ * Keep a list of processes who have interest in a lockres.
+ * Note: this is now only uesed for check recursive cluster locking.
+ */
+static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
+                                  struct ocfs2_lock_holder *oh)
+{
+       INIT_LIST_HEAD(&oh->oh_list);
+       oh->oh_owner_pid = get_pid(task_pid(current));
+
+       spin_lock(&lockres->l_lock);
+       list_add_tail(&oh->oh_list, &lockres->l_holders);
+       spin_unlock(&lockres->l_lock);
+}
+
+static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
+                                      struct ocfs2_lock_holder *oh)
+{
+       spin_lock(&lockres->l_lock);
+       list_del(&oh->oh_list);
+       spin_unlock(&lockres->l_lock);
+
+       put_pid(oh->oh_owner_pid);
+}
+
+static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
+{
+       struct ocfs2_lock_holder *oh;
+       struct pid *pid;
+
+       /* look in the list of holders for one with the current task as owner */
+       spin_lock(&lockres->l_lock);
+       pid = task_pid(current);
+       list_for_each_entry(oh, &lockres->l_holders, oh_list) {
+               if (oh->oh_owner_pid == pid) {
+                       spin_unlock(&lockres->l_lock);
+                       return 1;
+               }
+       }
+       spin_unlock(&lockres->l_lock);
+
+       return 0;
+}
+
 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
                                     int level)
 {
@@ -2333,8 +2378,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
                goto getbh;
        }
 
-       if (ocfs2_mount_local(osb))
-               goto local;
+       if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
+           ocfs2_mount_local(osb))
+               goto update;
 
        if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
                ocfs2_wait_for_recovery(osb);
@@ -2363,7 +2409,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
        if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
                ocfs2_wait_for_recovery(osb);
 
-local:
+update:
        /*
         * We only see this flag if we're being called from
         * ocfs2_read_locked_inode(). It means we're locking an inode
@@ -2497,6 +2543,59 @@ void ocfs2_inode_unlock(struct inode *inode,
                ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
 }
 
+/*
+ * This _tracker variantes are introduced to deal with the recursive cluster
+ * locking issue. The idea is to keep track of a lock holder on the stack of
+ * the current process. If there's a lock holder on the stack, we know the
+ * task context is already protected by cluster locking. Currently, they're
+ * used in some VFS entry routines.
+ *
+ * return < 0 on error, return == 0 if there's no lock holder on the stack
+ * before this call, return == 1 if this call would be a recursive locking.
+ */
+int ocfs2_inode_lock_tracker(struct inode *inode,
+                            struct buffer_head **ret_bh,
+                            int ex,
+                            struct ocfs2_lock_holder *oh)
+{
+       int status;
+       int arg_flags = 0, has_locked;
+       struct ocfs2_lock_res *lockres;
+
+       lockres = &OCFS2_I(inode)->ip_inode_lockres;
+       has_locked = ocfs2_is_locked_by_me(lockres);
+       /* Just get buffer head if the cluster lock has been taken */
+       if (has_locked)
+               arg_flags = OCFS2_META_LOCK_GETBH;
+
+       if (likely(!has_locked || ret_bh)) {
+               status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
+               if (status < 0) {
+                       if (status != -ENOENT)
+                               mlog_errno(status);
+                       return status;
+               }
+       }
+       if (!has_locked)
+               ocfs2_add_holder(lockres, oh);
+
+       return has_locked;
+}
+
+void ocfs2_inode_unlock_tracker(struct inode *inode,
+                               int ex,
+                               struct ocfs2_lock_holder *oh,
+                               int had_lock)
+{
+       struct ocfs2_lock_res *lockres;
+
+       lockres = &OCFS2_I(inode)->ip_inode_lockres;
+       if (!had_lock) {
+               ocfs2_remove_holder(lockres, oh);
+               ocfs2_inode_unlock(inode, ex);
+       }
+}
+
 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
 {
        struct ocfs2_lock_res *lockres;