]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/delayed-inode.c
Merge tag 'drm-intel-next-2017-05-29' of git://anongit.freedesktop.org/git/drm-intel...
[karo-tx-linux.git] / fs / btrfs / delayed-inode.c
index 1aff676f0e5b5b6c63efd32eee44958b40968e2a..8ae409b5a61d7186f050780beced1bf72cd572ca 100644 (file)
@@ -52,7 +52,7 @@ static inline void btrfs_init_delayed_node(
 {
        delayed_node->root = root;
        delayed_node->inode_id = inode_id;
-       atomic_set(&delayed_node->refs, 0);
+       refcount_set(&delayed_node->refs, 0);
        delayed_node->ins_root = RB_ROOT;
        delayed_node->del_root = RB_ROOT;
        mutex_init(&delayed_node->mutex);
@@ -81,7 +81,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
 
        node = READ_ONCE(btrfs_inode->delayed_node);
        if (node) {
-               atomic_inc(&node->refs);
+               refcount_inc(&node->refs);
                return node;
        }
 
@@ -89,14 +89,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
        node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
        if (node) {
                if (btrfs_inode->delayed_node) {
-                       atomic_inc(&node->refs);        /* can be accessed */
+                       refcount_inc(&node->refs);      /* can be accessed */
                        BUG_ON(btrfs_inode->delayed_node != node);
                        spin_unlock(&root->inode_lock);
                        return node;
                }
                btrfs_inode->delayed_node = node;
                /* can be accessed and cached in the inode */
-               atomic_add(2, &node->refs);
+               refcount_add(2, &node->refs);
                spin_unlock(&root->inode_lock);
                return node;
        }
@@ -125,7 +125,7 @@ again:
        btrfs_init_delayed_node(node, root, ino);
 
        /* cached in the btrfs inode and can be accessed */
-       atomic_add(2, &node->refs);
+       refcount_set(&node->refs, 2);
 
        ret = radix_tree_preload(GFP_NOFS);
        if (ret) {
@@ -166,7 +166,7 @@ static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
        } else {
                list_add_tail(&node->n_list, &root->node_list);
                list_add_tail(&node->p_list, &root->prepare_list);
-               atomic_inc(&node->refs);        /* inserted into list */
+               refcount_inc(&node->refs);      /* inserted into list */
                root->nodes++;
                set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
        }
@@ -180,7 +180,7 @@ static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
        spin_lock(&root->lock);
        if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
                root->nodes--;
-               atomic_dec(&node->refs);        /* not in the list */
+               refcount_dec(&node->refs);      /* not in the list */
                list_del_init(&node->n_list);
                if (!list_empty(&node->p_list))
                        list_del_init(&node->p_list);
@@ -201,7 +201,7 @@ static struct btrfs_delayed_node *btrfs_first_delayed_node(
 
        p = delayed_root->node_list.next;
        node = list_entry(p, struct btrfs_delayed_node, n_list);
-       atomic_inc(&node->refs);
+       refcount_inc(&node->refs);
 out:
        spin_unlock(&delayed_root->lock);
 
@@ -228,7 +228,7 @@ static struct btrfs_delayed_node *btrfs_next_delayed_node(
                p = node->n_list.next;
 
        next = list_entry(p, struct btrfs_delayed_node, n_list);
-       atomic_inc(&next->refs);
+       refcount_inc(&next->refs);
 out:
        spin_unlock(&delayed_root->lock);
 
@@ -253,11 +253,11 @@ static void __btrfs_release_delayed_node(
                btrfs_dequeue_delayed_node(delayed_root, delayed_node);
        mutex_unlock(&delayed_node->mutex);
 
-       if (atomic_dec_and_test(&delayed_node->refs)) {
+       if (refcount_dec_and_test(&delayed_node->refs)) {
                bool free = false;
                struct btrfs_root *root = delayed_node->root;
                spin_lock(&root->inode_lock);
-               if (atomic_read(&delayed_node->refs) == 0) {
+               if (refcount_read(&delayed_node->refs) == 0) {
                        radix_tree_delete(&root->delayed_nodes_tree,
                                          delayed_node->inode_id);
                        free = true;
@@ -286,7 +286,7 @@ static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
        p = delayed_root->prepare_list.next;
        list_del_init(p);
        node = list_entry(p, struct btrfs_delayed_node, p_list);
-       atomic_inc(&node->refs);
+       refcount_inc(&node->refs);
 out:
        spin_unlock(&delayed_root->lock);
 
@@ -308,7 +308,7 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
                item->ins_or_del = 0;
                item->bytes_reserved = 0;
                item->delayed_node = NULL;
-               atomic_set(&item->refs, 1);
+               refcount_set(&item->refs, 1);
        }
        return item;
 }
@@ -483,7 +483,7 @@ static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
 {
        if (item) {
                __btrfs_remove_delayed_item(item);
-               if (atomic_dec_and_test(&item->refs))
+               if (refcount_dec_and_test(&item->refs))
                        kfree(item);
        }
 }
@@ -1600,14 +1600,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
        mutex_lock(&delayed_node->mutex);
        item = __btrfs_first_delayed_insertion_item(delayed_node);
        while (item) {
-               atomic_inc(&item->refs);
+               refcount_inc(&item->refs);
                list_add_tail(&item->readdir_list, ins_list);
                item = __btrfs_next_delayed_item(item);
        }
 
        item = __btrfs_first_delayed_deletion_item(delayed_node);
        while (item) {
-               atomic_inc(&item->refs);
+               refcount_inc(&item->refs);
                list_add_tail(&item->readdir_list, del_list);
                item = __btrfs_next_delayed_item(item);
        }
@@ -1621,7 +1621,7 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
         * insert/delete delayed items in this period. So we also needn't
         * requeue or dequeue this delayed node.
         */
-       atomic_dec(&delayed_node->refs);
+       refcount_dec(&delayed_node->refs);
 
        return true;
 }
@@ -1634,13 +1634,13 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
 
        list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
                list_del(&curr->readdir_list);
-               if (atomic_dec_and_test(&curr->refs))
+               if (refcount_dec_and_test(&curr->refs))
                        kfree(curr);
        }
 
        list_for_each_entry_safe(curr, next, del_list, readdir_list) {
                list_del(&curr->readdir_list);
-               if (atomic_dec_and_test(&curr->refs))
+               if (refcount_dec_and_test(&curr->refs))
                        kfree(curr);
        }
 
@@ -1667,7 +1667,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
                list_del(&curr->readdir_list);
                ret = (curr->key.offset == index);
 
-               if (atomic_dec_and_test(&curr->refs))
+               if (refcount_dec_and_test(&curr->refs))
                        kfree(curr);
 
                if (ret)
@@ -1705,7 +1705,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
                list_del(&curr->readdir_list);
 
                if (curr->key.offset < ctx->pos) {
-                       if (atomic_dec_and_test(&curr->refs))
+                       if (refcount_dec_and_test(&curr->refs))
                                kfree(curr);
                        continue;
                }
@@ -1722,7 +1722,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
                over = !dir_emit(ctx, name, name_len,
                               location.objectid, d_type);
 
-               if (atomic_dec_and_test(&curr->refs))
+               if (refcount_dec_and_test(&curr->refs))
                        kfree(curr);
 
                if (over)
@@ -1963,7 +1963,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
                inode_id = delayed_nodes[n - 1]->inode_id + 1;
 
                for (i = 0; i < n; i++)
-                       atomic_inc(&delayed_nodes[i]->refs);
+                       refcount_inc(&delayed_nodes[i]->refs);
                spin_unlock(&root->inode_lock);
 
                for (i = 0; i < n; i++) {