kmemleak: Add the slab memory allocation/freeing hooks
[safe/jmp/linux-2.6] / mm / slab.c
index 4fc1761..859067f 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/cpu.h>
 #include       <linux/sysctl.h>
 #include       <linux/module.h>
-#include       <trace/kmemtrace.h>
+#include       <linux/kmemtrace.h>
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/uaccess.h>
 #include       <linux/nodemask.h>
+#include       <linux/kmemleak.h>
 #include       <linux/mempolicy.h>
 #include       <linux/mutex.h>
 #include       <linux/fault-inject.h>
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
                         SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
-                        SLAB_DEBUG_OBJECTS)
+                        SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
 #endif
 
 /*
@@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
        struct array_cache *nc = NULL;
 
        nc = kmalloc_node(memsize, GFP_KERNEL, node);
+       /*
+        * The array_cache structures contain pointers to free object.
+        * However, when such objects are allocated or transfered to another
+        * cache the pointers are not cleared and they could be counted as
+        * valid references during a kmemleak scan. Therefore, kmemleak must
+        * not scan such objects.
+        */
+       kmemleak_no_scan(nc);
        if (nc) {
                nc->avail = 0;
                nc->limit = entries;
@@ -2621,6 +2630,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
                /* Slab management obj is off-slab. */
                slabp = kmem_cache_alloc_node(cachep->slabp_cache,
                                              local_flags, nodeid);
+               /*
+                * If the first object in the slab is leaked (it's allocated
+                * but no one has a reference to it), we want to make sure
+                * kmemleak does not treat the ->s_mem pointer as a reference
+                * to the object. Otherwise we will not report the leak.
+                */
+               kmemleak_scan_area(slabp, offsetof(struct slab, list),
+                                  sizeof(struct list_head), local_flags);
                if (!slabp)
                        return NULL;
        } else {
@@ -3141,6 +3158,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
                STATS_INC_ALLOCMISS(cachep);
                objp = cache_alloc_refill(cachep, flags);
        }
+       /*
+        * To avoid a false negative, if an object that is in one of the
+        * per-CPU caches is leaked, we need to make sure kmemleak doesn't
+        * treat the array pointers as a reference to the object.
+        */
+       kmemleak_erase(&ac->entry[ac->avail]);
        return objp;
 }
 
@@ -3360,6 +3383,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
   out:
        local_irq_restore(save_flags);
        ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+       kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
+                                flags);
 
        if (unlikely((flags & __GFP_ZERO) && ptr))
                memset(ptr, 0, obj_size(cachep));
@@ -3415,6 +3440,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
        objp = __do_cache_alloc(cachep, flags);
        local_irq_restore(save_flags);
        objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+       kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
+                                flags);
        prefetchw(objp);
 
        if (unlikely((flags & __GFP_ZERO) && objp))
@@ -3530,6 +3557,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
        struct array_cache *ac = cpu_cache_get(cachep);
 
        check_irq_off();
+       kmemleak_free_recursive(objp, cachep->flags);
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
        /*
@@ -3565,8 +3593,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
        void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
 
-       kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
-                            obj_size(cachep), cachep->buffer_size, flags);
+       trace_kmem_cache_alloc(_RET_IP_, ret,
+                              obj_size(cachep), cachep->buffer_size, flags);
 
        return ret;
 }
@@ -3627,9 +3655,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
        void *ret = __cache_alloc_node(cachep, flags, nodeid,
                                       __builtin_return_address(0));
 
-       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
-                                 obj_size(cachep), cachep->buffer_size,
-                                 flags, nodeid);
+       trace_kmem_cache_alloc_node(_RET_IP_, ret,
+                                   obj_size(cachep), cachep->buffer_size,
+                                   flags, nodeid);
 
        return ret;
 }
@@ -3657,9 +3685,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
                return cachep;
        ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
 
-       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
-                                 (unsigned long) caller, ret,
-                                 size, cachep->buffer_size, flags, node);
+       trace_kmalloc_node((unsigned long) caller, ret,
+                          size, cachep->buffer_size, flags, node);
 
        return ret;
 }
@@ -3709,9 +3736,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
                return cachep;
        ret = __cache_alloc(cachep, flags, caller);
 
-       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
-                            (unsigned long) caller, ret,
-                            size, cachep->buffer_size, flags);
+       trace_kmalloc((unsigned long) caller, ret,
+                     size, cachep->buffer_size, flags);
 
        return ret;
 }
@@ -3757,7 +3783,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
        __cache_free(cachep, objp);
        local_irq_restore(flags);
 
-       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
+       trace_kmem_cache_free(_RET_IP_, objp);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -3775,6 +3801,8 @@ void kfree(const void *objp)
        struct kmem_cache *c;
        unsigned long flags;
 
+       trace_kfree(_RET_IP_, objp);
+
        if (unlikely(ZERO_OR_NULL_PTR(objp)))
                return;
        local_irq_save(flags);
@@ -3784,8 +3812,6 @@ void kfree(const void *objp)
        debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
-
-       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
 }
 EXPORT_SYMBOL(kfree);