NFS: Changes to inode->i_nlinks must set the NFS_INO_INVALID_ATTR flag
[safe/jmp/linux-2.6] / mm / slab.c
index 03927cb..e76eee4 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/fault-inject.h>
 #include       <linux/rtmutex.h>
 #include       <linux/reciprocal_div.h>
+#include       <linux/debugobjects.h>
 
 #include       <asm/cacheflush.h>
 #include       <asm/tlbflush.h>
 #define        BYTES_PER_WORD          sizeof(void *)
 #define        REDZONE_ALIGN           max(BYTES_PER_WORD, __alignof__(unsigned long long))
 
-#ifndef cache_line_size
-#define cache_line_size()      L1_CACHE_BYTES
-#endif
-
 #ifndef ARCH_KMALLOC_MINALIGN
 /*
  * Enforce a minimum alignment for the kmalloc caches.
                         SLAB_CACHE_DMA | \
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+                        SLAB_DEBUG_OBJECTS)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+                        SLAB_DEBUG_OBJECTS)
 #endif
 
 /*
@@ -407,7 +406,7 @@ struct kmem_cache {
        unsigned int dflags;            /* dynamic flags */
 
        /* constructor func */
-       void (*ctor)(struct kmem_cache *, void *);
+       void (*ctor)(void *obj);
 
 /* 5) cache creation/removal */
        const char *name;
@@ -862,7 +861,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 }
 
-#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
+#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 
 static void __slab_error(const char *function, struct kmem_cache *cachep,
                        char *msg)
@@ -1902,15 +1901,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
 #endif
 
 #if DEBUG
-/**
- * slab_destroy_objs - destroy a slab and its objects
- * @cachep: cache pointer being destroyed
- * @slabp: slab pointer being destroyed
- *
- * Call the registered destructor for each object in a slab that is being
- * destroyed.
- */
-static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
 {
        int i;
        for (i = 0; i < cachep->num; i++) {
@@ -1939,7 +1930,7 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
        }
 }
 #else
-static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
+static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
 {
 }
 #endif
@@ -1957,7 +1948,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
 {
        void *addr = slabp->s_mem - slabp->colouroff;
 
-       slab_destroy_objs(cachep, slabp);
+       slab_destroy_debugcheck(cachep, slabp);
        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
                struct slab_rcu *slab_rcu;
 
@@ -2146,8 +2137,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
  */
 struct kmem_cache *
 kmem_cache_create (const char *name, size_t size, size_t align,
-       unsigned long flags,
-       void (*ctor)(struct kmem_cache *, void *))
+       unsigned long flags, void (*ctor)(void *))
 {
        size_t left_over, slab_size, ralign;
        struct kmem_cache *cachep = NULL, *pc;
@@ -2157,7 +2147,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         */
        if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
            size > KMALLOC_MAX_SIZE) {
-               printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
+               printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
                                name);
                BUG();
        }
@@ -2455,7 +2445,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
        struct kmem_list3 *l3;
        int node;
 
-       on_each_cpu(do_drain, cachep, 1, 1);
+       on_each_cpu(do_drain, cachep, 1);
        check_irq_on();
        for_each_online_node(node) {
                l3 = cachep->nodelists[node];
@@ -2662,7 +2652,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
                 * They must also be threaded.
                 */
                if (cachep->ctor && !(cachep->flags & SLAB_POISON))
-                       cachep->ctor(cachep, objp + obj_offset(cachep));
+                       cachep->ctor(objp + obj_offset(cachep));
 
                if (cachep->flags & SLAB_RED_ZONE) {
                        if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2678,7 +2668,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
                                         cachep->buffer_size / PAGE_SIZE, 0);
 #else
                if (cachep->ctor)
-                       cachep->ctor(cachep, objp);
+                       cachep->ctor(objp);
 #endif
                slab_bufctl(slabp)[i] = i + 1;
        }
@@ -3102,7 +3092,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
 #endif
        objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON)
-               cachep->ctor(cachep, objp);
+               cachep->ctor(objp);
 #if ARCH_SLAB_MINALIGN
        if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
                printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
@@ -3242,15 +3232,16 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 {
        struct zonelist *zonelist;
        gfp_t local_flags;
-       struct zone **z;
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(flags);
        void *obj = NULL;
        int nid;
 
        if (flags & __GFP_THISNODE)
                return NULL;
 
-       zonelist = &NODE_DATA(slab_node(current->mempolicy))
-                       ->node_zonelists[gfp_zone(flags)];
+       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
 retry:
@@ -3258,14 +3249,17 @@ retry:
         * Look through allowed nodes for objects available
         * from existing per node queues.
         */
-       for (z = zonelist->zones; *z && !obj; z++) {
-               nid = zone_to_nid(*z);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+               nid = zone_to_nid(zone);
 
-               if (cpuset_zone_allowed_hardwall(*z, flags) &&
+               if (cpuset_zone_allowed_hardwall(zone, flags) &&
                        cache->nodelists[nid] &&
-                       cache->nodelists[nid]->free_objects)
+                       cache->nodelists[nid]->free_objects) {
                                obj = ____cache_alloc_node(cache,
                                        flags | GFP_THISNODE, nid);
+                               if (obj)
+                                       break;
+               }
        }
 
        if (!obj) {
@@ -3763,6 +3757,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 
        local_irq_save(flags);
        debug_check_no_locks_freed(objp, obj_size(cachep));
+       if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+               debug_check_no_obj_freed(objp, obj_size(cachep));
        __cache_free(cachep, objp);
        local_irq_restore(flags);
 }
@@ -3788,6 +3784,7 @@ void kfree(const void *objp)
        kfree_debugcheck(objp);
        c = virt_to_cache(objp);
        debug_check_no_locks_freed(objp, obj_size(c));
+       debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
 }
@@ -3933,7 +3930,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
        }
        new->cachep = cachep;
 
-       on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
+       on_each_cpu(do_ccupdate_local, (void *)new, 1);
 
        check_irq_on();
        cachep->batchcount = batchcount;
@@ -4475,4 +4472,3 @@ size_t ksize(const void *objp)
 
        return obj_size(virt_to_cache(objp));
 }
-EXPORT_SYMBOL(ksize);