From a3187e438bc6565d6e54a550a19073d1b453f041 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Thu, 19 May 2016 17:10:41 -0700 Subject: [PATCH] mm: slab: remove ZONE_DMA_FLAG Now we have IS_ENABLED helper to check if a Kconfig option is enabled or not, so ZONE_DMA_FLAG sounds no longer useful. And, the use of ZONE_DMA_FLAG in slab looks pointless according to the comment [1] from Johannes Weiner, so remove them and ORing passed in flags with the cache gfp flags has been done in kmem_getpages(). [1] https://lkml.org/lkml/2014/9/25/553 Link: http://lkml.kernel.org/r/1462381297-11009-1-git-send-email-yang.shi@linaro.org Signed-off-by: Yang Shi Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/Kconfig | 5 ----- mm/slab.c | 23 +---------------------- 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/mm/Kconfig b/mm/Kconfig index 989f8f3d77e0..d6e9042b99e0 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -268,11 +268,6 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION config PHYS_ADDR_T_64BIT def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT -config ZONE_DMA_FLAG - int - default "0" if !ZONE_DMA - default "1" - config BOUNCE bool "Enable bounce buffers" default y diff --git a/mm/slab.c b/mm/slab.c index 1ee26a0d358f..d81565a92864 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2236,7 +2236,7 @@ done: cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); cachep->flags = flags; cachep->allocflags = __GFP_COMP; - if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) + if (flags & SLAB_CACHE_DMA) cachep->allocflags |= GFP_DMA; cachep->size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); @@ -2664,16 +2664,6 @@ static void cache_init_objs(struct kmem_cache *cachep, } } -static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) -{ - if (CONFIG_ZONE_DMA_FLAG) { - if (flags & GFP_DMA) - BUG_ON(!(cachep->allocflags & GFP_DMA)); - else - BUG_ON(cachep->allocflags & GFP_DMA); - } -} - static void *slab_get_obj(struct kmem_cache *cachep, struct page *page) { void *objp; @@ -2752,14 +2742,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, if (gfpflags_allow_blocking(local_flags)) local_irq_enable(); - /* - * The test for missing atomic flag is performed here, rather than - * the more obvious place, simply to reduce the critical path length - * in kmem_cache_alloc(). If a caller is seriously mis-behaving they - * will eventually be caught here (where it matters). - */ - kmem_flagcheck(cachep, flags); - /* * Get mem for the objs. Attempt to allocate a physical page from * 'nodeid'. @@ -3145,9 +3127,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags) { might_sleep_if(gfpflags_allow_blocking(flags)); -#if DEBUG - kmem_flagcheck(cachep, flags); -#endif } #if DEBUG -- 2.39.2