]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/delayed-ref.c
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
[karo-tx-linux.git] / fs / btrfs / delayed-ref.c
index 125cf76fcd086803d35bd257bdb54168486e0e0b..66e4f29505a33dbecd45b5d6a80e878c87818bc0 100644 (file)
@@ -101,6 +101,11 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
                return -1;
        if (ref1->type > ref2->type)
                return 1;
+       /* merging of sequenced refs is not allowed */
+       if (ref1->seq < ref2->seq)
+               return -1;
+       if (ref1->seq > ref2->seq)
+               return 1;
        if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
            ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
                return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -150,16 +155,22 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
 
 /*
  * find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot
+ * head if it was able to find one, or NULL if nothing was in that spot.
+ * If return_bigger is given, the next bigger entry is returned if no exact
+ * match is found.
  */
 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
                                  u64 bytenr,
-                                 struct btrfs_delayed_ref_node **last)
+                                 struct btrfs_delayed_ref_node **last,
+                                 int return_bigger)
 {
-       struct rb_node *n = root->rb_node;
+       struct rb_node *n;
        struct btrfs_delayed_ref_node *entry;
-       int cmp;
+       int cmp = 0;
 
+again:
+       n = root->rb_node;
+       entry = NULL;
        while (n) {
                entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
                WARN_ON(!entry->in_tree);
@@ -182,6 +193,19 @@ static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
                else
                        return entry;
        }
+       if (entry && return_bigger) {
+               if (cmp > 0) {
+                       n = rb_next(&entry->rb_node);
+                       if (!n)
+                               n = rb_first(root);
+                       entry = rb_entry(n, struct btrfs_delayed_ref_node,
+                                        rb_node);
+                       bytenr = entry->bytenr;
+                       return_bigger = 0;
+                       goto again;
+               }
+               return entry;
+       }
        return NULL;
 }
 
@@ -209,6 +233,24 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                           u64 seq)
+{
+       struct seq_list *elem;
+
+       assert_spin_locked(&delayed_refs->lock);
+       if (list_empty(&delayed_refs->seq_head))
+               return 0;
+
+       elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
+       if (seq >= elem->seq) {
+               pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
+                        seq, elem->seq, delayed_refs);
+               return 1;
+       }
+       return 0;
+}
+
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 start)
 {
@@ -223,20 +265,8 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                node = rb_first(&delayed_refs->root);
        } else {
                ref = NULL;
-               find_ref_head(&delayed_refs->root, start, &ref);
+               find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
                if (ref) {
-                       struct btrfs_delayed_ref_node *tmp;
-
-                       node = rb_prev(&ref->rb_node);
-                       while (node) {
-                               tmp = rb_entry(node,
-                                              struct btrfs_delayed_ref_node,
-                                              rb_node);
-                               if (tmp->bytenr < start)
-                                       break;
-                               ref = tmp;
-                               node = rb_prev(&ref->rb_node);
-                       }
                        node = &ref->rb_node;
                } else
                        node = rb_first(&delayed_refs->root);
@@ -390,7 +420,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
  * this does all the dirty work in terms of maintaining the correct
  * overall modification count.
  */
-static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+                                       struct btrfs_trans_handle *trans,
                                        struct btrfs_delayed_ref_node *ref,
                                        u64 bytenr, u64 num_bytes,
                                        int action, int is_data)
@@ -437,6 +468,7 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
        ref->action  = 0;
        ref->is_head = 1;
        ref->in_tree = 1;
+       ref->seq = 0;
 
        head_ref = btrfs_delayed_node_to_head(ref);
        head_ref->must_insert_reserved = must_insert_reserved;
@@ -468,14 +500,17 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
 /*
  * helper to insert a delayed tree ref into the rbtree.
  */
-static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                                        struct btrfs_trans_handle *trans,
                                         struct btrfs_delayed_ref_node *ref,
                                         u64 bytenr, u64 num_bytes, u64 parent,
-                                        u64 ref_root, int level, int action)
+                                        u64 ref_root, int level, int action,
+                                        int for_cow)
 {
        struct btrfs_delayed_ref_node *existing;
        struct btrfs_delayed_tree_ref *full_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       u64 seq = 0;
 
        if (action == BTRFS_ADD_DELAYED_EXTENT)
                action = BTRFS_ADD_DELAYED_REF;
@@ -491,14 +526,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
        ref->is_head = 0;
        ref->in_tree = 1;
 
+       if (need_ref_seq(for_cow, ref_root))
+               seq = inc_delayed_seq(delayed_refs);
+       ref->seq = seq;
+
        full_ref = btrfs_delayed_node_to_tree_ref(ref);
-       if (parent) {
-               full_ref->parent = parent;
+       full_ref->parent = parent;
+       full_ref->root = ref_root;
+       if (parent)
                ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
-       } else {
-               full_ref->root = ref_root;
+       else
                ref->type = BTRFS_TREE_BLOCK_REF_KEY;
-       }
        full_ref->level = level;
 
        trace_btrfs_delayed_tree_ref(ref, full_ref, action);
@@ -522,15 +560,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 /*
  * helper to insert a delayed data ref into the rbtree.
  */
-static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                                        struct btrfs_trans_handle *trans,
                                         struct btrfs_delayed_ref_node *ref,
                                         u64 bytenr, u64 num_bytes, u64 parent,
                                         u64 ref_root, u64 owner, u64 offset,
-                                        int action)
+                                        int action, int for_cow)
 {
        struct btrfs_delayed_ref_node *existing;
        struct btrfs_delayed_data_ref *full_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       u64 seq = 0;
 
        if (action == BTRFS_ADD_DELAYED_EXTENT)
                action = BTRFS_ADD_DELAYED_REF;
@@ -546,14 +586,18 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
        ref->is_head = 0;
        ref->in_tree = 1;
 
+       if (need_ref_seq(for_cow, ref_root))
+               seq = inc_delayed_seq(delayed_refs);
+       ref->seq = seq;
+
        full_ref = btrfs_delayed_node_to_data_ref(ref);
-       if (parent) {
-               full_ref->parent = parent;
+       full_ref->parent = parent;
+       full_ref->root = ref_root;
+       if (parent)
                ref->type = BTRFS_SHARED_DATA_REF_KEY;
-       } else {
-               full_ref->root = ref_root;
+       else
                ref->type = BTRFS_EXTENT_DATA_REF_KEY;
-       }
+
        full_ref->objectid = owner;
        full_ref->offset = offset;
 
@@ -580,10 +624,12 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
  * to make sure the delayed ref is eventually processed before this
  * transaction commits.
  */
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes, u64 parent,
                               u64 ref_root,  int level, int action,
-                              struct btrfs_delayed_extent_op *extent_op)
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow)
 {
        struct btrfs_delayed_tree_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
@@ -610,13 +656,17 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
-                                  action, 0);
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+                                  num_bytes, action, 0);
        BUG_ON(ret);
 
-       ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
-                                  parent, ref_root, level, action);
+       ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
+                                  num_bytes, parent, ref_root, level, action,
+                                  for_cow);
        BUG_ON(ret);
+       if (!need_ref_seq(for_cow, ref_root) &&
+           waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
@@ -624,11 +674,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 /*
  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  */
-int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, int action,
-                              struct btrfs_delayed_extent_op *extent_op)
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow)
 {
        struct btrfs_delayed_data_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
@@ -655,18 +707,23 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
-                                  action, 1);
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+                                  num_bytes, action, 1);
        BUG_ON(ret);
 
-       ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
-                                  parent, ref_root, owner, offset, action);
+       ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
+                                  num_bytes, parent, ref_root, owner, offset,
+                                  action, for_cow);
        BUG_ON(ret);
+       if (!need_ref_seq(for_cow, ref_root) &&
+           waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
 
-int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+                               struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
                                struct btrfs_delayed_extent_op *extent_op)
 {
@@ -683,11 +740,13 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
        delayed_refs = &trans->transaction->delayed_refs;
        spin_lock(&delayed_refs->lock);
 
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
                                   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
                                   extent_op->is_data);
        BUG_ON(ret);
 
+       if (waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
@@ -704,7 +763,7 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
        struct btrfs_delayed_ref_root *delayed_refs;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
+       ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
        if (ref)
                return btrfs_delayed_node_to_head(ref);
        return NULL;