]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: remove __GFP_NO_KSWAPD
authorRik van Riel <riel@redhat.com>
Thu, 13 Sep 2012 00:58:26 +0000 (10:58 +1000)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 18 Sep 2012 06:04:12 +0000 (16:04 +1000)
When transparent huge pages were introduced, memory compaction and swap
storms were an issue, and the kernel had to be careful to not make THP
allocations cause pageout or compaction.

Now that we have working compaction deferral, kswapd is smart enough to
invoke compaction and the quadratic behaviour around isolate_free_pages
has been fixed, it should be safe to remove __GFP_NO_KSWAPD.

[minchan@kernel.org: Comment fix]
[mgorman@suse.de: Avoid direct reclaim for deferred compaction]
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
drivers/mtd/mtdcore.c
include/linux/gfp.h
include/trace/events/gfpflags.h
mm/page_alloc.c

index ec794a72975dd886205f2460e2eaba08089a8daf..374c46dff7dd65d3aea7eef69b22a332d727ad0f 100644 (file)
@@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
  * until the request succeeds or until the allocation size falls below
  * the system page size. This attempts to make sure it does not adversely
  * impact system performance, so when allocating more than one page, we
- * ask the memory allocator to avoid re-trying, swapping, writing back
- * or performing I/O.
+ * ask the memory allocator to avoid re-trying.
  *
  * Note, this function also makes sure that the allocated buffer is aligned to
  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
@@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
  */
 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
 {
-       gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
-                      __GFP_NORETRY | __GFP_NO_KSWAPD;
+       gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
        size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
        void *kbuf;
 
index 4883f393f50a8049cc83298068b5751621a7a8dc..f9bc873ce7d6c353ab9b6fb4a7a9fce75854c8c8 100644 (file)
@@ -35,7 +35,6 @@ struct vm_area_struct;
 #else
 #define ___GFP_NOTRACK         0
 #endif
-#define ___GFP_NO_KSWAPD       0x400000u
 #define ___GFP_OTHER_NODE      0x800000u
 #define ___GFP_WRITE           0x1000000u
 
@@ -90,7 +89,6 @@ struct vm_area_struct;
 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
 #define __GFP_NOTRACK  ((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */
 
-#define __GFP_NO_KSWAPD        ((__force gfp_t)___GFP_NO_KSWAPD)
 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
 #define __GFP_WRITE    ((__force gfp_t)___GFP_WRITE)   /* Allocator intends to dirty page */
 
@@ -120,8 +118,7 @@ struct vm_area_struct;
                                 __GFP_MOVABLE)
 #define GFP_IOFS       (__GFP_IO | __GFP_FS)
 #define GFP_TRANSHUGE  (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
-                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
-                        __GFP_NO_KSWAPD)
+                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
 
 #ifdef CONFIG_NUMA
 #define GFP_THISNODE   (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
index d6fd8e5b14b76c41bfd532c3fa86255e4e92b0f3..9391706e92541d2b5d865610a88525bf8215a599 100644 (file)
@@ -36,7 +36,6 @@
        {(unsigned long)__GFP_RECLAIMABLE,      "GFP_RECLAIMABLE"},     \
        {(unsigned long)__GFP_MOVABLE,          "GFP_MOVABLE"},         \
        {(unsigned long)__GFP_NOTRACK,          "GFP_NOTRACK"},         \
-       {(unsigned long)__GFP_NO_KSWAPD,        "GFP_NO_KSWAPD"},       \
        {(unsigned long)__GFP_OTHER_NODE,       "GFP_OTHER_NODE"}       \
        ) : "GFP_NOWAIT"
 
index 1bfadce2798962cc63e3b4669d7fcd73ff686f07..27c155b441918eaf3da8755cfdfaaee9f40e4592 100644 (file)
@@ -2361,9 +2361,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                goto nopage;
 
 restart:
-       if (!(gfp_mask & __GFP_NO_KSWAPD))
-               wake_all_kswapd(order, zonelist, high_zoneidx,
-                                               zone_idx(preferred_zone));
+       wake_all_kswapd(order, zonelist, high_zoneidx,
+                                       zone_idx(preferred_zone));
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -2440,7 +2439,7 @@ rebalance:
         * system then fail the allocation instead of entering direct reclaim.
         */
        if ((deferred_compaction || contended_compaction) &&
-                                               (gfp_mask & __GFP_NO_KSWAPD))
+           (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
                goto nopage;
 
        /* Try direct reclaim and then allocating */