]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/file.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs...
[karo-tx-linux.git] / fs / btrfs / file.c
index b06d289f998f310390527324368513fceadafb19..9f67e623206d90c7ef279a6291b116f5211007b4 100644 (file)
 #include <linux/statfs.h>
 #include <linux/compat.h>
 #include <linux/slab.h>
+#include <linux/btrfs.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "btrfs_inode.h"
-#include "ioctl.h"
 #include "print-tree.h"
 #include "tree-log.h"
 #include "locking.h"
@@ -1426,8 +1426,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 
                cond_resched();
 
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping,
-                                                  dirty_pages);
+               balance_dirty_pages_ratelimited(inode->i_mapping);
                if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root);
 
@@ -1545,7 +1544,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
         * although we have opened a file as writable, we have
         * to stop this write operation to ensure FS consistency.
         */
-       if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
                mutex_unlock(&inode->i_mutex);
                err = -EROFS;
                goto out;
@@ -1628,7 +1627,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
         */
        if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
                               &BTRFS_I(inode)->runtime_flags)) {
-               btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
+               struct btrfs_trans_handle *trans;
+               struct btrfs_root *root = BTRFS_I(inode)->root;
+
+               /*
+                * We need to block on a committing transaction to keep us from
+                * throwing a ordered operation on to the list and causing
+                * something like sync to deadlock trying to flush out this
+                * inode.
+                */
+               trans = btrfs_start_transaction(root, 0);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+               btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
+               btrfs_end_transaction(trans, root);
                if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
                        filemap_flush(inode->i_mapping);
        }
@@ -1655,16 +1667,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        int ret = 0;
        struct btrfs_trans_handle *trans;
+       bool full_sync = 0;
 
        trace_btrfs_sync_file(file, datasync);
 
        /*
         * We write the dirty pages in the range and wait until they complete
         * out of the ->i_mutex. If so, we can flush the dirty pages by
-        * multi-task, and make the performance up.
+        * multi-task, and make the performance up.  See
+        * btrfs_wait_ordered_range for an explanation of the ASYNC check.
         */
        atomic_inc(&BTRFS_I(inode)->sync_writers);
-       ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+       if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                            &BTRFS_I(inode)->runtime_flags))
+               ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
        atomic_dec(&BTRFS_I(inode)->sync_writers);
        if (ret)
                return ret;
@@ -1676,7 +1693,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
         * range being left.
         */
        atomic_inc(&root->log_batch);
-       btrfs_wait_ordered_range(inode, start, end - start + 1);
+       full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                            &BTRFS_I(inode)->runtime_flags);
+       if (full_sync)
+               btrfs_wait_ordered_range(inode, start, end - start + 1);
        atomic_inc(&root->log_batch);
 
        /*
@@ -1743,13 +1763,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        if (ret != BTRFS_NO_LOG_SYNC) {
                if (ret > 0) {
+                       /*
+                        * If we didn't already wait for ordered extents we need
+                        * to do that now.
+                        */
+                       if (!full_sync)
+                               btrfs_wait_ordered_range(inode, start,
+                                                        end - start + 1);
                        ret = btrfs_commit_transaction(trans, root);
                } else {
                        ret = btrfs_sync_log(trans, root);
-                       if (ret == 0)
+                       if (ret == 0) {
                                ret = btrfs_end_transaction(trans, root);
-                       else
+                       } else {
+                               if (!full_sync)
+                                       btrfs_wait_ordered_range(inode, start,
+                                                                end -
+                                                                start + 1);
                                ret = btrfs_commit_transaction(trans, root);
+                       }
                }
        } else {
                ret = btrfs_end_transaction(trans, root);
@@ -2240,7 +2272,7 @@ out:
        return ret;
 }
 
-static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
+static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_map *em;
@@ -2275,7 +2307,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
         * before the position we want in case there is outstanding delalloc
         * going on here.
         */
-       if (origin == SEEK_HOLE && start != 0) {
+       if (whence == SEEK_HOLE && start != 0) {
                if (start <= root->sectorsize)
                        em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
                                                     root->sectorsize, 0);
@@ -2309,13 +2341,13 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
                                }
                        }
 
-                       if (origin == SEEK_HOLE) {
+                       if (whence == SEEK_HOLE) {
                                *offset = start;
                                free_extent_map(em);
                                break;
                        }
                } else {
-                       if (origin == SEEK_DATA) {
+                       if (whence == SEEK_DATA) {
                                if (em->block_start == EXTENT_MAP_DELALLOC) {
                                        if (start >= inode->i_size) {
                                                free_extent_map(em);
@@ -2355,16 +2387,16 @@ out:
        return ret;
 }
 
-static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
+static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
 {
        struct inode *inode = file->f_mapping->host;
        int ret;
 
        mutex_lock(&inode->i_mutex);
-       switch (origin) {
+       switch (whence) {
        case SEEK_END:
        case SEEK_CUR:
-               offset = generic_file_llseek(file, offset, origin);
+               offset = generic_file_llseek(file, offset, whence);
                goto out;
        case SEEK_DATA:
        case SEEK_HOLE:
@@ -2373,7 +2405,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
                        return -ENXIO;
                }
 
-               ret = find_desired_extent(inode, &offset, origin);
+               ret = find_desired_extent(inode, &offset, whence);
                if (ret) {
                        mutex_unlock(&inode->i_mutex);
                        return ret;