]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm/page-writeback.c: make determine_dirtyable_memory static again
authorJohannes Weiner <hannes@cmpxchg.org>
Mon, 24 Oct 2011 14:54:01 +0000 (01:54 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 7 Nov 2011 02:40:26 +0000 (13:40 +1100)
The tracing ring-buffer used this function briefly, but not anymore.
Make it local to the writeback code again.

Also, move the function so that no forward declaration needs to be
reintroduced.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/writeback.h
mm/page-writeback.c

index a378c295851f8cf9fe4d804080b30f72cf5101cd..34a005515fef1c4a015ab7cb0053f42665fa343c 100644 (file)
@@ -138,8 +138,6 @@ extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
 
-extern unsigned long determine_dirtyable_memory(void);
-
 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos);
index 94e01fb05e5e0155c67e5e8c265933c4a44b7847..6ff5bac617cdc191e0dbf1d23d8b81d8e62b778f 100644 (file)
@@ -130,6 +130,66 @@ unsigned long global_dirty_limit;
 static struct prop_descriptor vm_completions;
 static struct prop_descriptor vm_dirties;
 
+/*
+ * Work out the current dirty-memory clamping and background writeout
+ * thresholds.
+ *
+ * The main aim here is to lower them aggressively if there is a lot of mapped
+ * memory around.  To avoid stressing page reclaim with lots of unreclaimable
+ * pages.  It is better to clamp down on writers than to start swapping, and
+ * performing lots of scanning.
+ *
+ * We only allow 1/2 of the currently-unmapped memory to be dirtied.
+ *
+ * We don't permit the clamping level to fall below 5% - that is getting rather
+ * excessive.
+ *
+ * We make sure that the background writeout level is below the adjusted
+ * clamping level.
+ */
+static unsigned long highmem_dirtyable_memory(unsigned long total)
+{
+#ifdef CONFIG_HIGHMEM
+       int node;
+       unsigned long x = 0;
+
+       for_each_node_state(node, N_HIGH_MEMORY) {
+               struct zone *z =
+                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+
+               x += zone_page_state(z, NR_FREE_PAGES) +
+                    zone_reclaimable_pages(z);
+       }
+       /*
+        * Make sure that the number of highmem pages is never larger
+        * than the number of the total dirtyable memory. This can only
+        * occur in very strange VM situations but we want to make sure
+        * that this does not occur.
+        */
+       return min(x, total);
+#else
+       return 0;
+#endif
+}
+
+/**
+ * determine_dirtyable_memory - amount of memory that may be used
+ *
+ * Returns the numebr of pages that can currently be freed and used
+ * by the kernel for direct mappings.
+ */
+static unsigned long determine_dirtyable_memory(void)
+{
+       unsigned long x;
+
+       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+
+       if (!vm_highmem_is_dirtyable)
+               x -= highmem_dirtyable_memory(x);
+
+       return x + 1;   /* Ensure that we never return 0 */
+}
+
 /*
  * couple the period to the dirty_ratio:
  *
@@ -198,7 +258,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
        return ret;
 }
 
-
 int dirty_bytes_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
@@ -298,67 +357,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
 }
 EXPORT_SYMBOL(bdi_set_max_ratio);
 
-/*
- * Work out the current dirty-memory clamping and background writeout
- * thresholds.
- *
- * The main aim here is to lower them aggressively if there is a lot of mapped
- * memory around.  To avoid stressing page reclaim with lots of unreclaimable
- * pages.  It is better to clamp down on writers than to start swapping, and
- * performing lots of scanning.
- *
- * We only allow 1/2 of the currently-unmapped memory to be dirtied.
- *
- * We don't permit the clamping level to fall below 5% - that is getting rather
- * excessive.
- *
- * We make sure that the background writeout level is below the adjusted
- * clamping level.
- */
-
-static unsigned long highmem_dirtyable_memory(unsigned long total)
-{
-#ifdef CONFIG_HIGHMEM
-       int node;
-       unsigned long x = 0;
-
-       for_each_node_state(node, N_HIGH_MEMORY) {
-               struct zone *z =
-                       &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
-
-               x += zone_page_state(z, NR_FREE_PAGES) +
-                    zone_reclaimable_pages(z);
-       }
-       /*
-        * Make sure that the number of highmem pages is never larger
-        * than the number of the total dirtyable memory. This can only
-        * occur in very strange VM situations but we want to make sure
-        * that this does not occur.
-        */
-       return min(x, total);
-#else
-       return 0;
-#endif
-}
-
-/**
- * determine_dirtyable_memory - amount of memory that may be used
- *
- * Returns the numebr of pages that can currently be freed and used
- * by the kernel for direct mappings.
- */
-unsigned long determine_dirtyable_memory(void)
-{
-       unsigned long x;
-
-       x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
-
-       if (!vm_highmem_is_dirtyable)
-               x -= highmem_dirtyable_memory(x);
-
-       return x + 1;   /* Ensure that we never return 0 */
-}
-
 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
                                           unsigned long bg_thresh)
 {