]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
slub: create new ___slab_alloc function that can be called with irqs disabled
authorChristoph Lameter <cl@linux.com>
Wed, 21 Oct 2015 22:02:49 +0000 (09:02 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 21 Oct 2015 22:02:49 +0000 (09:02 +1100)
Bulk alloc needs a function like that because it enables interrupts before
calling __slab_alloc which promptly disables them again using the expensive
local_irq_save().

Signed-off-by: Christoph Lameter <cl@linux.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/slub.c

index f614b5dc396bc17b43cebacd97383243bbb03b99..02cfb3a5983e849f6296034eb60b1eb27ab9851f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2298,23 +2298,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
  * And if we were unable to get a new slab from the partial slab lists then
  * we need to allocate a new slab. This is the slowest path since it involves
  * a call to the page allocator and the setup of a new slab.
+ *
+ * Version of __slab_alloc to use when we know that interrupts are
+ * already disabled (which is the case for bulk allocation).
  */
-static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
                          unsigned long addr, struct kmem_cache_cpu *c)
 {
        void *freelist;
        struct page *page;
-       unsigned long flags;
-
-       local_irq_save(flags);
-#ifdef CONFIG_PREEMPT
-       /*
-        * We may have been preempted and rescheduled on a different
-        * cpu before disabling interrupts. Need to reload cpu area
-        * pointer.
-        */
-       c = this_cpu_ptr(s->cpu_slab);
-#endif
 
        page = c->page;
        if (!page)
@@ -2372,7 +2364,6 @@ load_freelist:
        VM_BUG_ON(!c->page->frozen);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
-       local_irq_restore(flags);
        return freelist;
 
 new_slab:
@@ -2389,7 +2380,6 @@ new_slab:
 
        if (unlikely(!freelist)) {
                slab_out_of_memory(s, gfpflags, node);
-               local_irq_restore(flags);
                return NULL;
        }
 
@@ -2405,10 +2395,34 @@ new_slab:
        deactivate_slab(s, page, get_freepointer(s, freelist));
        c->page = NULL;
        c->freelist = NULL;
-       local_irq_restore(flags);
        return freelist;
 }
 
+/*
+ * Another one that disabled interrupt and compensates for possible
+ * cpu changes by refetching the per cpu area pointer.
+ */
+static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+                         unsigned long addr, struct kmem_cache_cpu *c)
+{
+       void *p;
+       unsigned long flags;
+
+       local_irq_save(flags);
+#ifdef CONFIG_PREEMPT
+       /*
+        * We may have been preempted and rescheduled on a different
+        * cpu before disabling interrupts. Need to reload cpu area
+        * pointer.
+        */
+       c = this_cpu_ptr(s->cpu_slab);
+#endif
+
+       p = ___slab_alloc(s, gfpflags, node, addr, c);
+       local_irq_restore(flags);
+       return p;
+}
+
 /*
  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  * have the fastpath folded into their functions. So no function call