]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mm: kmemleak: avoid false negatives on vmalloc'ed objects
authorCatalin Marinas <catalin.marinas@arm.com>
Tue, 5 Nov 2013 05:55:40 +0000 (16:55 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 5 Nov 2013 05:55:40 +0000 (16:55 +1100)
Commit 248ac0e1 ("mm/vmalloc: remove guard page from between vmap blocks")
had the side effect of making vmap_area.va_end member point to the next
vmap_area.va_start.  This was creating an artificial reference to
vmalloc'ed objects and kmemleak was rarely reporting vmalloc() leaks.

This patch marks the vmap_area containing pointers explicitly and reduces
the min ref_count to 2 as vm_struct still contains a reference to the
vmalloc'ed object.  The kmemleak add_scan_area() function has been
improved to allow a SIZE_MAX argument covering the rest of the object (for
simpler calling sites).

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/kmemleak.c
mm/vmalloc.c

index e126b0ef9ad20023d6a8d3ff505ae71ad96fdaa0..31f01c5011e59414e95b888b2b3d515940ab72ab 100644 (file)
@@ -753,7 +753,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
        }
 
        spin_lock_irqsave(&object->lock, flags);
-       if (ptr + size > object->pointer + object->size) {
+       if (size == SIZE_MAX) {
+               size = object->pointer + object->size - ptr;
+       } else if (ptr + size > object->pointer + object->size) {
                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
                dump_object_info(object);
                kmem_cache_free(scan_area_cache, area);
index 745fa9567475cddbca9b905df35984ad972e33bc..0fdf96803c5b59623792a24e57015fb0e25098bb 100644 (file)
@@ -359,6 +359,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
        if (unlikely(!va))
                return ERR_PTR(-ENOMEM);
 
+       /*
+        * Only scan the relevant parts containing pointers to other objects
+        * to avoid false negatives.
+        */
+       kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+
 retry:
        spin_lock(&vmap_area_lock);
        /*
@@ -1645,11 +1651,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
        clear_vm_uninitialized_flag(area);
 
        /*
-        * A ref_count = 3 is needed because the vm_struct and vmap_area
-        * structures allocated in the __get_vm_area_node() function contain
-        * references to the virtual address of the vmalloc'ed block.
+        * A ref_count = 2 is needed because vm_struct allocated in
+        * __get_vm_area_node() contains a reference to the virtual address of
+        * the vmalloc'ed block.
         */
-       kmemleak_alloc(addr, real_size, 3, gfp_mask);
+       kmemleak_alloc(addr, real_size, 2, gfp_mask);
 
        return addr;