]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
writeback: remove nr_pages_dirtied arg from balance_dirty_pages_ratelimited_nr()
authorNamjae Jeon <linkinjeon@gmail.com>
Tue, 23 Oct 2012 02:50:09 +0000 (13:50 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 23 Oct 2012 03:11:46 +0000 (14:11 +1100)
There is no reason to pass the nr_pages_dirtied argument, because
nr_pages_dirtied value from the caller is unused in
balance_dirty_pages_ratelimited_nr().

Signed-off-by: Namjae Jeon <linkinjeon@gmail.com>
Signed-off-by: Vivek Trivedi <vtrivedi018@gmail.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/ioctl.c
fs/ocfs2/file.c
fs/splice.c
include/linux/writeback.h
mm/page-writeback.c

index 7cda51995c1e589eaf36fe048518bbbe0bd21109..22a0439e5a86316196f07c26896e823f5cf756a1 100644 (file)
@@ -3416,8 +3416,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
        num_dirty = root->fs_info->dirty_metadata_bytes;
 
        if (num_dirty > thresh) {
-               balance_dirty_pages_ratelimited_nr(
-                                  root->fs_info->btree_inode->i_mapping, 1);
+               balance_dirty_pages_ratelimited(
+                                  root->fs_info->btree_inode->i_mapping);
        }
        return;
 }
@@ -3437,8 +3437,8 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
        num_dirty = root->fs_info->dirty_metadata_bytes;
 
        if (num_dirty > thresh) {
-               balance_dirty_pages_ratelimited_nr(
-                                  root->fs_info->btree_inode->i_mapping, 1);
+               balance_dirty_pages_ratelimited(
+                                  root->fs_info->btree_inode->i_mapping);
        }
        return;
 }
index 9ab1bed88116bd8f412f095aaef231f5bb749baf..a8ee75cb96eeab848677daeea7130297bbd22e43 100644 (file)
@@ -1346,8 +1346,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 
                cond_resched();
 
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping,
-                                                  dirty_pages);
+               balance_dirty_pages_ratelimited(inode->i_mapping);
                if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root, 1);
 
index 61168805f175b3c50ffef99b87e97fdeb2643ebd..e27ea127ebedd856ca0e19639037047904a97802 100644 (file)
@@ -1223,7 +1223,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                }
 
                defrag_count += ret;
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
+               balance_dirty_pages_ratelimited(inode->i_mapping);
                mutex_unlock(&inode->i_mutex);
 
                if (newer_than) {
index 5a4ee77cec518ab6108514008681ffd74d562bdc..dda08980494259bbd9107157b64f8de995ddcfbe 100644 (file)
@@ -2513,18 +2513,15 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
                ret = sd.num_spliced;
 
        if (ret > 0) {
-               unsigned long nr_pages;
                int err;
 
-               nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
                err = generic_write_sync(out, *ppos, ret);
                if (err)
                        ret = err;
                else
                        *ppos += ret;
 
-               balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
+               balance_dirty_pages_ratelimited(mapping);
        }
 
        return ret;
index 13e5b4776e7aade28b0f2161b4eb9a27a8030f31..8890604e3fcdd638d6d150ffbfc981a783467dfe 100644 (file)
@@ -1024,17 +1024,14 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
                ret = sd.num_spliced;
 
        if (ret > 0) {
-               unsigned long nr_pages;
                int err;
 
-               nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
                err = generic_write_sync(out, *ppos, ret);
                if (err)
                        ret = err;
                else
                        *ppos += ret;
-               balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
+               balance_dirty_pages_ratelimited(mapping);
        }
        sb_end_write(inode->i_sb);
 
index 50c3e8fa06a865b439047f80a0ac9be9a26cfeeb..b82a83aba31185870b132d4e8d7c99bb975fc0b6 100644 (file)
@@ -161,14 +161,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
                            unsigned long start_time);
 
 void page_writeback_init(void);
-void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
-                                       unsigned long nr_pages_dirtied);
-
-static inline void
-balance_dirty_pages_ratelimited(struct address_space *mapping)
-{
-       balance_dirty_pages_ratelimited_nr(mapping, 1);
-}
+void balance_dirty_pages_ratelimited(struct address_space *mapping);
 
 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
                                void *data);
index 830893b2b3c776b07070d7257e771406b388dec8..6f4271224493a01699f7ffdc29afa54b7ffd94ac 100644 (file)
@@ -1069,7 +1069,7 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi,
 }
 
 /*
- * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
+ * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
  * will look to see if it needs to start dirty throttling.
  *
  * If dirty_poll_interval is too low, big NUMA machines will call the expensive
@@ -1436,9 +1436,8 @@ static DEFINE_PER_CPU(int, bdp_ratelimits);
 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
 
 /**
- * balance_dirty_pages_ratelimited_nr - balance dirty memory state
+ * balance_dirty_pages_ratelimited - balance dirty memory state
  * @mapping: address_space which was dirtied
- * @nr_pages_dirtied: number of pages which the caller has just dirtied
  *
  * Processes which are dirtying memory should call in here once for each page
  * which was newly dirtied.  The function will periodically check the system's
@@ -1449,8 +1448,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
  * limit we decrease the ratelimiting by a lot, to prevent individual processes
  * from overshooting the limit by (ratelimit_pages) each.
  */
-void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
-                                       unsigned long nr_pages_dirtied)
+void balance_dirty_pages_ratelimited(struct address_space *mapping)
 {
        struct backing_dev_info *bdi = mapping->backing_dev_info;
        int ratelimit;
@@ -1484,6 +1482,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
         */
        p = &__get_cpu_var(dirty_throttle_leaks);
        if (*p > 0 && current->nr_dirtied < ratelimit) {
+               unsigned long nr_pages_dirtied;
                nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
                *p -= nr_pages_dirtied;
                current->nr_dirtied += nr_pages_dirtied;
@@ -1493,7 +1492,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
        if (unlikely(current->nr_dirtied >= ratelimit))
                balance_dirty_pages(mapping, current->nr_dirtied);
 }
-EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
+EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
 
 void throttle_vm_writeout(gfp_t gfp_mask)
 {