]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/file.c
Merge tag 'drm-intel-next-2017-05-29' of git://anongit.freedesktop.org/git/drm-intel...
[karo-tx-linux.git] / fs / btrfs / file.c
index b5c5da215d051e2d3ef13673917b2f86b042edc8..da1096eb1a406f648b1bb0c7f3ee1da0e3013646 100644 (file)
@@ -92,10 +92,10 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
  * If an existing record is found the defrag item you
  * pass in is freed
  */
-static int __btrfs_add_inode_defrag(struct inode *inode,
+static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
                                    struct inode_defrag *defrag)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
        struct inode_defrag *entry;
        struct rb_node **p;
        struct rb_node *parent = NULL;
@@ -123,7 +123,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
                        return -EEXIST;
                }
        }
-       set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
+       set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
        rb_link_node(&defrag->rb_node, parent, p);
        rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
        return 0;
@@ -145,10 +145,10 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
  * enabled
  */
 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
-                          struct inode *inode)
+                          struct btrfs_inode *inode)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+       struct btrfs_root *root = inode->root;
        struct inode_defrag *defrag;
        u64 transid;
        int ret;
@@ -156,13 +156,13 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        if (!__need_auto_defrag(fs_info))
                return 0;
 
-       if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
+       if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
                return 0;
 
        if (trans)
                transid = trans->transid;
        else
-               transid = BTRFS_I(inode)->root->last_trans;
+               transid = inode->root->last_trans;
 
        defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
        if (!defrag)
@@ -173,7 +173,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
        defrag->root = root->root_key.objectid;
 
        spin_lock(&fs_info->defrag_inodes_lock);
-       if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
+       if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
                /*
                 * If we set IN_DEFRAG flag and evict the inode from memory,
                 * and then re-read this inode, this new inode doesn't have
@@ -194,10 +194,10 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
  * the same inode in the tree, we will merge them together (by
  * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
  */
-static void btrfs_requeue_inode_defrag(struct inode *inode,
+static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
                                       struct inode_defrag *defrag)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
        int ret;
 
        if (!__need_auto_defrag(fs_info))
@@ -334,7 +334,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
         */
        if (num_defrag == BTRFS_DEFRAG_BATCH) {
                defrag->last_offset = range.start;
-               btrfs_requeue_inode_defrag(inode, defrag);
+               btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
        } else if (defrag->last_offset && !defrag->cycled) {
                /*
                 * we didn't fill our defrag batch, but
@@ -343,7 +343,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
                 */
                defrag->last_offset = 0;
                defrag->cycled = 1;
-               btrfs_requeue_inode_defrag(inode, defrag);
+               btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
        } else {
                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
        }
@@ -529,13 +529,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
  * this drops all the extents in the cache that intersect the range
  * [start, end].  Existing extents are split as required.
  */
-void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
+void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
                             int skip_pinned)
 {
        struct extent_map *em;
        struct extent_map *split = NULL;
        struct extent_map *split2 = NULL;
-       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct extent_map_tree *em_tree = &inode->extent_tree;
        u64 len = end - start + 1;
        u64 gen;
        int ret;
@@ -702,7 +702,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
        struct btrfs_file_extent_item *fi;
        struct btrfs_key key;
        struct btrfs_key new_key;
-       u64 ino = btrfs_ino(inode);
+       u64 ino = btrfs_ino(BTRFS_I(inode));
        u64 search_start = start;
        u64 disk_bytenr = 0;
        u64 num_bytes = 0;
@@ -720,7 +720,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
        int leafs_visited = 0;
 
        if (drop_cache)
-               btrfs_drop_extent_cache(inode, start, end - 1, 0);
+               btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
 
        if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
                modify_tree = 0;
@@ -1082,10 +1082,10 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
  * two or three.
  */
 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
-                             struct inode *inode, u64 start, u64 end)
+                             struct btrfs_inode *inode, u64 start, u64 end)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+       struct btrfs_root *root = inode->root;
        struct extent_buffer *leaf;
        struct btrfs_path *path;
        struct btrfs_file_extent_item *fi;
@@ -1404,6 +1404,47 @@ fail:
 
 }
 
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+                                        const u64 start,
+                                        const u64 len,
+                                        struct extent_state **cached_state)
+{
+       u64 search_start = start;
+       const u64 end = start + len - 1;
+
+       while (search_start < end) {
+               const u64 search_len = end - search_start + 1;
+               struct extent_map *em;
+               u64 em_len;
+               int ret = 0;
+
+               em = btrfs_get_extent(inode, NULL, 0, search_start,
+                                     search_len, 0);
+               if (IS_ERR(em))
+                       return PTR_ERR(em);
+
+               if (em->block_start != EXTENT_MAP_HOLE)
+                       goto next;
+
+               em_len = em->len;
+               if (em->start < search_start)
+                       em_len -= search_start - em->start;
+               if (em_len > search_len)
+                       em_len = search_len;
+
+               ret = set_extent_bit(&inode->io_tree, search_start,
+                                    search_start + em_len - 1,
+                                    EXTENT_DELALLOC_NEW,
+                                    NULL, cached_state, GFP_NOFS);
+next:
+               search_start = extent_map_end(em);
+               free_extent_map(em);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
 /*
  * This function locks the extent and properly waits for data=ordered extents
  * to finish before allowing the pages to be modified if need.
@@ -1415,13 +1456,13 @@ fail:
  * the other < 0 number - Something wrong happens
  */
 static noinline int
-lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
+lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
                                size_t num_pages, loff_t pos,
                                size_t write_bytes,
                                u64 *lockstart, u64 *lockend,
                                struct extent_state **cached_state)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
        u64 start_pos;
        u64 last_pos;
        int i;
@@ -1432,33 +1473,44 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
                + round_up(pos + write_bytes - start_pos,
                           fs_info->sectorsize) - 1;
 
-       if (start_pos < inode->i_size) {
+       if (start_pos < inode->vfs_inode.i_size ||
+           (inode->flags & BTRFS_INODE_PREALLOC)) {
                struct btrfs_ordered_extent *ordered;
-               lock_extent_bits(&BTRFS_I(inode)->io_tree,
-                                start_pos, last_pos, cached_state);
+               unsigned int clear_bits;
+
+               lock_extent_bits(&inode->io_tree, start_pos, last_pos,
+                               cached_state);
                ordered = btrfs_lookup_ordered_range(inode, start_pos,
                                                     last_pos - start_pos + 1);
                if (ordered &&
                    ordered->file_offset + ordered->len > start_pos &&
                    ordered->file_offset <= last_pos) {
-                       unlock_extent_cached(&BTRFS_I(inode)->io_tree,
-                                            start_pos, last_pos,
-                                            cached_state, GFP_NOFS);
+                       unlock_extent_cached(&inode->io_tree, start_pos,
+                                       last_pos, cached_state, GFP_NOFS);
                        for (i = 0; i < num_pages; i++) {
                                unlock_page(pages[i]);
                                put_page(pages[i]);
                        }
-                       btrfs_start_ordered_extent(inode, ordered, 1);
+                       btrfs_start_ordered_extent(&inode->vfs_inode,
+                                       ordered, 1);
                        btrfs_put_ordered_extent(ordered);
                        return -EAGAIN;
                }
                if (ordered)
                        btrfs_put_ordered_extent(ordered);
-
-               clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
-                                 last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
-                                 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
-                                 0, 0, cached_state, GFP_NOFS);
+               ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
+                                                   last_pos - start_pos + 1,
+                                                   cached_state);
+               clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
+                       EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
+               if (ret)
+                       clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
+               clear_extent_bit(&inode->io_tree, start_pos,
+                                last_pos, clear_bits,
+                                (clear_bits & EXTENT_LOCKED) ? 1 : 0,
+                                0, cached_state, GFP_NOFS);
+               if (ret)
+                       return ret;
                *lockstart = start_pos;
                *lockend = last_pos;
                ret = 1;
@@ -1474,11 +1526,11 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
        return ret;
 }
 
-static noinline int check_can_nocow(struct inode *inode, loff_t pos,
+static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
                                    size_t *write_bytes)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+       struct btrfs_root *root = inode->root;
        struct btrfs_ordered_extent *ordered;
        u64 lockstart, lockend;
        u64 num_bytes;
@@ -1493,19 +1545,20 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
                           fs_info->sectorsize) - 1;
 
        while (1) {
-               lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
+               lock_extent(&inode->io_tree, lockstart, lockend);
                ordered = btrfs_lookup_ordered_range(inode, lockstart,
                                                     lockend - lockstart + 1);
                if (!ordered) {
                        break;
                }
-               unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
-               btrfs_start_ordered_extent(inode, ordered, 1);
+               unlock_extent(&inode->io_tree, lockstart, lockend);
+               btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
                btrfs_put_ordered_extent(ordered);
        }
 
        num_bytes = lockend - lockstart + 1;
-       ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
+       ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
+                       NULL, NULL, NULL);
        if (ret <= 0) {
                ret = 0;
                btrfs_end_write_no_snapshoting(root);
@@ -1514,7 +1567,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
                                     num_bytes - pos + lockstart);
        }
 
-       unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
+       unlock_extent(&inode->io_tree, lockstart, lockend);
 
        return ret;
 }
@@ -1579,7 +1632,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                if (ret < 0) {
                        if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                                      BTRFS_INODE_PREALLOC)) &&
-                           check_can_nocow(inode, pos, &write_bytes) > 0) {
+                           check_can_nocow(BTRFS_I(inode), pos,
+                                       &write_bytes) > 0) {
                                /*
                                 * For nodata cow case, no need to reserve
                                 * data space.
@@ -1599,7 +1653,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                        }
                }
 
-               ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
+               ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
+                               reserve_bytes);
                if (ret) {
                        if (!only_release_metadata)
                                btrfs_free_reserved_data_space(inode, pos,
@@ -1623,9 +1678,9 @@ again:
                if (ret)
                        break;
 
-               ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
-                                               pos, write_bytes, &lockstart,
-                                               &lockend, &cached_state);
+               ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages,
+                               num_pages, pos, write_bytes, &lockstart,
+                               &lockend, &cached_state);
                if (ret < 0) {
                        if (ret == -EAGAIN)
                                goto again;
@@ -1677,7 +1732,7 @@ again:
                                spin_unlock(&BTRFS_I(inode)->lock);
                        }
                        if (only_release_metadata) {
-                               btrfs_delalloc_release_metadata(inode,
+                               btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                                release_bytes);
                        } else {
                                u64 __pos;
@@ -1738,7 +1793,8 @@ again:
        if (release_bytes) {
                if (only_release_metadata) {
                        btrfs_end_write_no_snapshoting(root);
-                       btrfs_delalloc_release_metadata(inode, release_bytes);
+                       btrfs_delalloc_release_metadata(BTRFS_I(inode),
+                                       release_bytes);
                } else {
                        btrfs_delalloc_release_space(inode,
                                                round_down(pos, fs_info->sectorsize),
@@ -2062,7 +2118,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * commit does not start nor waits for ordered extents to complete.
         */
        smp_mb();
-       if (btrfs_inode_in_log(inode, fs_info->generation) ||
+       if (btrfs_inode_in_log(BTRFS_I(inode), fs_info->generation) ||
            (full_sync && BTRFS_I(inode)->last_trans <=
             fs_info->last_trans_committed) ||
            (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
@@ -2193,7 +2249,7 @@ static int btrfs_file_mmap(struct file    *filp, struct vm_area_struct *vma)
        return 0;
 }
 
-static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
+static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
                          int slot, u64 start, u64 end)
 {
        struct btrfs_file_extent_item *fi;
@@ -2222,15 +2278,16 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
        return 0;
 }
 
-static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
-                     struct btrfs_path *path, u64 offset, u64 end)
+static int fill_holes(struct btrfs_trans_handle *trans,
+               struct btrfs_inode *inode,
+               struct btrfs_path *path, u64 offset, u64 end)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+       struct btrfs_root *root = inode->root;
        struct extent_buffer *leaf;
        struct btrfs_file_extent_item *fi;
        struct extent_map *hole_em;
-       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       struct extent_map_tree *em_tree = &inode->extent_tree;
        struct btrfs_key key;
        int ret;
 
@@ -2253,7 +2310,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
        }
 
        leaf = path->nodes[0];
-       if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
+       if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
                u64 num_bytes;
 
                path->slots[0]--;
@@ -2285,9 +2342,8 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
        }
        btrfs_release_path(path);
 
-       ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
-                                      0, 0, end - offset, 0, end - offset,
-                                      0, 0, 0);
+       ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
+                       offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
        if (ret)
                return ret;
 
@@ -2297,8 +2353,7 @@ out:
        hole_em = alloc_extent_map();
        if (!hole_em) {
                btrfs_drop_extent_cache(inode, offset, end - 1, 0);
-               set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                       &BTRFS_I(inode)->runtime_flags);
+               set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
        } else {
                hole_em->start = offset;
                hole_em->len = end - offset;
@@ -2321,7 +2376,7 @@ out:
                free_extent_map(hole_em);
                if (ret)
                        set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                               &BTRFS_I(inode)->runtime_flags);
+                                       &inode->runtime_flags);
        }
 
        return 0;
@@ -2338,14 +2393,9 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
        struct extent_map *em;
        int ret = 0;
 
-       em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
-       if (IS_ERR_OR_NULL(em)) {
-               if (!em)
-                       ret = -ENOMEM;
-               else
-                       ret = PTR_ERR(em);
-               return ret;
-       }
+       em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0);
+       if (IS_ERR(em))
+               return PTR_ERR(em);
 
        /* Hole or vacuum extent(only exists in no-hole mode) */
        if (em->block_start == EXTENT_MAP_HOLE) {
@@ -2551,8 +2601,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                trans->block_rsv = &fs_info->trans_block_rsv;
 
                if (cur_offset < drop_end && cur_offset < ino_size) {
-                       ret = fill_holes(trans, inode, path, cur_offset,
-                                        drop_end);
+                       ret = fill_holes(trans, BTRFS_I(inode), path,
+                                       cur_offset, drop_end);
                        if (ret) {
                                /*
                                 * If we failed then we didn't insert our hole
@@ -2623,7 +2673,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
         * cur_offset == drop_end).
         */
        if (cur_offset < ino_size && cur_offset < drop_end) {
-               ret = fill_holes(trans, inode, path, cur_offset, drop_end);
+               ret = fill_holes(trans, BTRFS_I(inode), path,
+                               cur_offset, drop_end);
                if (ret) {
                        /* Same comment as above. */
                        btrfs_abort_transaction(trans, ret);
@@ -2748,7 +2799,8 @@ static long btrfs_fallocate(struct file *file, int mode,
         *
         * For qgroup space, it will be checked later.
         */
-       ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
+       ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
+                       alloc_end - alloc_start);
        if (ret < 0)
                return ret;
 
@@ -2828,13 +2880,10 @@ static long btrfs_fallocate(struct file *file, int mode,
        /* First, check if we exceed the qgroup limit */
        INIT_LIST_HEAD(&reserve_list);
        while (1) {
-               em = btrfs_get_extent(inode, NULL, 0, cur_offset,
+               em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
                                      alloc_end - cur_offset, 0);
-               if (IS_ERR_OR_NULL(em)) {
-                       if (!em)
-                               ret = -ENOMEM;
-                       else
-                               ret = PTR_ERR(em);
+               if (IS_ERR(em)) {
+                       ret = PTR_ERR(em);
                        break;
                }
                last_byte = min(extent_map_end(em), alloc_end);
@@ -2851,8 +2900,10 @@ static long btrfs_fallocate(struct file *file, int mode,
                        }
                        ret = btrfs_qgroup_reserve_data(inode, cur_offset,
                                        last_byte - cur_offset);
-                       if (ret < 0)
+                       if (ret < 0) {
+                               free_extent_map(em);
                                break;
+                       }
                } else {
                        /*
                         * Do not need to reserve unwritten extent for this
@@ -2876,7 +2927,7 @@ static long btrfs_fallocate(struct file *file, int mode,
                if (!ret)
                        ret = btrfs_prealloc_file_range(inode, mode,
                                        range->start,
-                                       range->len, 1 << inode->i_blkbits,
+                                       range->len, i_blocksize(inode),
                                        offset + len, &alloc_hint);
                else
                        btrfs_free_reserved_data_space(inode, range->start,
@@ -2955,7 +3006,8 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
                         &cached_state);
 
        while (start < inode->i_size) {
-               em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
+               em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
+                               start, len, 0);
                if (IS_ERR(em)) {
                        ret = PTR_ERR(em);
                        em = NULL;