]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: use IS_ENABLED(CONFIG_NUMA) instead of NUMA_BUILD
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Sat, 3 Nov 2012 00:42:12 +0000 (11:42 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 8 Nov 2012 04:08:31 +0000 (15:08 +1100)
We don't need custom NUMA_BUILD anymore, since we have handy
IS_ENABLED().

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/gfp.h
include/linux/kernel.h
mm/page_alloc.c
mm/vmalloc.c

index 02c1c9710be0e5fea029fd4462fe57793154216e..6418418e22fe8ffc61cb0c8cea8d576d4d095085 100644 (file)
@@ -263,7 +263,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
 
 static inline int gfp_zonelist(gfp_t flags)
 {
-       if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
+       if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE))
                return 1;
 
        return 0;
index a123b13b70fd80cb12def4c100ee198606b7c6b6..6bc5fa8d3dff6fa8f991570de9e747d37e77666a 100644 (file)
@@ -687,13 +687,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
 /* Trap pasters of __FUNCTION__ at compile-time */
 #define __FUNCTION__ (__func__)
 
-/* This helps us to avoid #ifdef CONFIG_NUMA */
-#ifdef CONFIG_NUMA
-#define NUMA_BUILD 1
-#else
-#define NUMA_BUILD 0
-#endif
-
 /* This helps us avoid #ifdef CONFIG_COMPACTION */
 #ifdef CONFIG_COMPACTION
 #define COMPACTION_BUILD 1
index 29c07b403c61bac113f6a02a95e03f29f1b2a8d7..a77097d54b2e9bdfe3a91357abc06da7979f9c8c 100644 (file)
@@ -1870,7 +1870,7 @@ zonelist_scan:
         */
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                high_zoneidx, nodemask) {
-               if (NUMA_BUILD && zlc_active &&
+               if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
                                continue;
                if ((alloc_flags & ALLOC_CPUSET) &&
@@ -1916,7 +1916,8 @@ zonelist_scan:
                                    classzone_idx, alloc_flags))
                                goto try_this_zone;
 
-                       if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
+                       if (IS_ENABLED(CONFIG_NUMA) &&
+                                       !did_zlc_setup && nr_online_nodes > 1) {
                                /*
                                 * we do zlc_setup if there are multiple nodes
                                 * and before considering the first zone allowed
@@ -1935,7 +1936,7 @@ zonelist_scan:
                         * As we may have just activated ZLC, check if the first
                         * eligible zone has failed zone_reclaim recently.
                         */
-                       if (NUMA_BUILD && zlc_active &&
+                       if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
                                !zlc_zone_worth_trying(zonelist, z, allowednodes))
                                continue;
 
@@ -1961,11 +1962,11 @@ try_this_zone:
                if (page)
                        break;
 this_zone_full:
-               if (NUMA_BUILD)
+               if (IS_ENABLED(CONFIG_NUMA))
                        zlc_mark_zone_full(zonelist, z);
        }
 
-       if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
+       if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
                /* Disable zlc cache for second zonelist scan */
                zlc_active = 0;
                goto zonelist_scan;
@@ -2265,7 +2266,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
                return NULL;
 
        /* After successful reclaim, reconsider all zones for allocation */
-       if (NUMA_BUILD)
+       if (IS_ENABLED(CONFIG_NUMA))
                zlc_clear_zones_full(zonelist);
 
 retry:
@@ -2411,7 +2412,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * allowed per node queues are empty and that nodes are
         * over allocated.
         */
-       if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+       if (IS_ENABLED(CONFIG_NUMA) &&
+                       (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
 restart:
@@ -2817,7 +2819,7 @@ unsigned int nr_free_pagecache_pages(void)
 
 static inline void show_node(struct zone *zone)
 {
-       if (NUMA_BUILD)
+       if (IS_ENABLED(CONFIG_NUMA))
                printk("Node %d ", zone_to_nid(zone));
 }
 
index 78e08300db21c6707dc1a69d6e8212f332ae68a0..5123a169ab7b0cbcb45776aef162b13c5d089455 100644 (file)
@@ -2550,7 +2550,7 @@ static void s_stop(struct seq_file *m, void *p)
 
 static void show_numa_info(struct seq_file *m, struct vm_struct *v)
 {
-       if (NUMA_BUILD) {
+       if (IS_ENABLED(CONFIG_NUMA)) {
                unsigned int nr, *counters = m->private;
 
                if (!counters)
@@ -2615,7 +2615,7 @@ static int vmalloc_open(struct inode *inode, struct file *file)
        unsigned int *ptr = NULL;
        int ret;
 
-       if (NUMA_BUILD) {
+       if (IS_ENABLED(CONFIG_NUMA)) {
                ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
                if (ptr == NULL)
                        return -ENOMEM;