slab: fix gfp flag in setup_cpu_cache()
[safe/jmp/linux-2.6] / mm / slab.c
index 2bd611f..cd76964 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/string.h>
 #include       <linux/uaccess.h>
 #include       <linux/nodemask.h>
+#include       <linux/kmemleak.h>
 #include       <linux/mempolicy.h>
 #include       <linux/mutex.h>
 #include       <linux/fault-inject.h>
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
 #endif
 
 /*
@@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
        struct array_cache *nc = NULL;
 
        nc = kmalloc_node(memsize, gfp, node);
+       /*
+        * The array_cache structures contain pointers to free object.
+        * However, when such objects are allocated or transfered to another
+        * cache the pointers are not cleared and they could be counted as
+        * valid references during a kmemleak scan. Therefore, kmemleak must
+        * not scan such objects.
+        */
+       kmemleak_no_scan(nc);
        if (nc) {
                nc->avail = 0;
                nc->limit = entries;
@@ -2093,7 +2102,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
                        for_each_online_node(node) {
                                cachep->nodelists[node] =
                                    kmalloc_node(sizeof(struct kmem_list3),
-                                               GFP_KERNEL, node);
+                                               gfp, node);
                                BUG_ON(!cachep->nodelists[node]);
                                kmem_list3_init(cachep->nodelists[node]);
                        }
@@ -2625,6 +2634,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
                /* Slab management obj is off-slab. */
                slabp = kmem_cache_alloc_node(cachep->slabp_cache,
                                              local_flags, nodeid);
+               /*
+                * If the first object in the slab is leaked (it's allocated
+                * but no one has a reference to it), we want to make sure
+                * kmemleak does not treat the ->s_mem pointer as a reference
+                * to the object. Otherwise we will not report the leak.
+                */
+               kmemleak_scan_area(slabp, offsetof(struct slab, list),
+                                  sizeof(struct list_head), local_flags);
                if (!slabp)
                        return NULL;
        } else {
@@ -3145,6 +3162,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
                STATS_INC_ALLOCMISS(cachep);
                objp = cache_alloc_refill(cachep, flags);
        }
+       /*
+        * To avoid a false negative, if an object that is in one of the
+        * per-CPU caches is leaked, we need to make sure kmemleak doesn't
+        * treat the array pointers as a reference to the object.
+        */
+       kmemleak_erase(&ac->entry[ac->avail]);
        return objp;
 }
 
@@ -3364,6 +3387,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
   out:
        local_irq_restore(save_flags);
        ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+       kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+                                flags);
 
        if (unlikely((flags & __GFP_ZERO) && ptr))
                memset(ptr, 0, obj_size(cachep));
@@ -3419,6 +3444,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
        objp = __do_cache_alloc(cachep, flags);
        local_irq_restore(save_flags);
        objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+       kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+                                flags);
        prefetchw(objp);
 
        if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3534,6 +3561,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
        struct array_cache *ac = cpu_cache_get(cachep);
 
        check_irq_off();
+       kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
        /*