[PATCH] initialise total_memory() earlier
[safe/jmp/linux-2.6] / mm / slab.c
index 6b691ec..664c3a1 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -94,6 +94,7 @@
 #include       <linux/interrupt.h>
 #include       <linux/init.h>
 #include       <linux/compiler.h>
+#include       <linux/cpuset.h>
 #include       <linux/seq_file.h>
 #include       <linux/notifier.h>
 #include       <linux/kallsyms.h>
                         SLAB_CACHE_DMA | \
                         SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
 #endif
 
 /*
 typedef unsigned int kmem_bufctl_t;
 #define BUFCTL_END     (((kmem_bufctl_t)(~0U))-0)
 #define BUFCTL_FREE    (((kmem_bufctl_t)(~0U))-1)
-#define        SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-2)
-
-/* Max number of objs-per-slab for caches which use off-slab slabs.
- * Needed to avoid a possible looping condition in cache_grow().
- */
-static unsigned long offslab_limit;
+#define        BUFCTL_ACTIVE   (((kmem_bufctl_t)(~0U))-2)
+#define        SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-3)
 
 /*
  * struct slab
@@ -334,6 +331,8 @@ static __always_inline int index_of(const size_t size)
        return 0;
 }
 
+static int slab_early_init = 1;
+
 #define INDEX_AC index_of(sizeof(struct arraycache_init))
 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
 
@@ -418,6 +417,7 @@ struct kmem_cache {
        unsigned long max_freeable;
        unsigned long node_allocs;
        unsigned long node_frees;
+       unsigned long node_overflow;
        atomic_t allochit;
        atomic_t allocmiss;
        atomic_t freehit;
@@ -463,6 +463,7 @@ struct kmem_cache {
 #define        STATS_INC_ERR(x)        ((x)->errors++)
 #define        STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
 #define        STATS_INC_NODEFREES(x)  ((x)->node_frees++)
+#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
 #define        STATS_SET_FREEABLE(x, i)                                        \
        do {                                                            \
                if ((x)->max_freeable < i)                              \
@@ -482,6 +483,7 @@ struct kmem_cache {
 #define        STATS_INC_ERR(x)        do { } while (0)
 #define        STATS_INC_NODEALLOCS(x) do { } while (0)
 #define        STATS_INC_NODEFREES(x)  do { } while (0)
+#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
 #define        STATS_SET_FREEABLE(x, i) do { } while (0)
 #define STATS_INC_ALLOCHIT(x)  do { } while (0)
 #define STATS_INC_ALLOCMISS(x) do { } while (0)
@@ -592,6 +594,7 @@ static inline struct kmem_cache *page_get_cache(struct page *page)
 {
        if (unlikely(PageCompound(page)))
                page = (struct page *)page_private(page);
+       BUG_ON(!PageSlab(page));
        return (struct kmem_cache *)page->lru.next;
 }
 
@@ -604,6 +607,7 @@ static inline struct slab *page_get_slab(struct page *page)
 {
        if (unlikely(PageCompound(page)))
                page = (struct page *)page_private(page);
+       BUG_ON(!PageSlab(page));
        return (struct slab *)page->lru.prev;
 }
 
@@ -695,6 +699,14 @@ static enum {
        FULL
 } g_cpucache_up;
 
+/*
+ * used by boot code to determine if it can use slab based allocator
+ */
+int slab_is_available(void)
+{
+       return g_cpucache_up == FULL;
+}
+
 static DEFINE_PER_CPU(struct work_struct, reap_work);
 
 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
@@ -830,7 +842,7 @@ static void init_reap_node(int cpu)
 
        node = next_node(cpu_to_node(cpu), node_online_map);
        if (node == MAX_NUMNODES)
-               node = 0;
+               node = first_node(node_online_map);
 
        __get_cpu_var(reap_node) = node;
 }
@@ -896,8 +908,33 @@ static struct array_cache *alloc_arraycache(int node, int entries,
        return nc;
 }
 
+/*
+ * Transfer objects in one arraycache to another.
+ * Locking must be handled by the caller.
+ *
+ * Return the number of entries transferred.
+ */
+static int transfer_objects(struct array_cache *to,
+               struct array_cache *from, unsigned int max)
+{
+       /* Figure out how many entries to transfer */
+       int nr = min(min(from->avail, max), to->limit - to->avail);
+
+       if (!nr)
+               return 0;
+
+       memcpy(to->entry + to->avail, from->entry + from->avail -nr,
+                       sizeof(void *) *nr);
+
+       from->avail -= nr;
+       to->avail += nr;
+       to->touched = 1;
+       return nr;
+}
+
 #ifdef CONFIG_NUMA
 static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
+static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
 
 static struct array_cache **alloc_alien_cache(int node, int limit)
 {
@@ -944,6 +981,14 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
 
        if (ac->avail) {
                spin_lock(&rl3->list_lock);
+               /*
+                * Stuff objects into the remote nodes shared array first.
+                * That way we could avoid the overhead of putting the objects
+                * into the free lists and getting them back later.
+                */
+               if (rl3->shared)
+                       transfer_objects(rl3->shared, ac, ac->limit);
+
                free_block(cachep, ac->entry, ac->avail, node);
                ac->avail = 0;
                spin_unlock(&rl3->list_lock);
@@ -959,8 +1004,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];
-               if (ac && ac->avail) {
-                       spin_lock_irq(&ac->lock);
+
+               if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
                        __drain_alien_cache(cachep, ac, node);
                        spin_unlock_irq(&ac->lock);
                }
@@ -983,6 +1028,40 @@ static void drain_alien_cache(struct kmem_cache *cachep,
                }
        }
 }
+
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+       struct slab *slabp = virt_to_slab(objp);
+       int nodeid = slabp->nodeid;
+       struct kmem_list3 *l3;
+       struct array_cache *alien = NULL;
+
+       /*
+        * Make sure we are not freeing a object from another node to the array
+        * cache on this cpu.
+        */
+       if (likely(slabp->nodeid == numa_node_id()))
+               return 0;
+
+       l3 = cachep->nodelists[numa_node_id()];
+       STATS_INC_NODEFREES(cachep);
+       if (l3->alien && l3->alien[nodeid]) {
+               alien = l3->alien[nodeid];
+               spin_lock(&alien->lock);
+               if (unlikely(alien->avail == alien->limit)) {
+                       STATS_INC_ACOVERFLOW(cachep);
+                       __drain_alien_cache(cachep, alien, nodeid);
+               }
+               alien->entry[alien->avail++] = objp;
+               spin_unlock(&alien->lock);
+       } else {
+               spin_lock(&(cachep->nodelists[nodeid])->list_lock);
+               free_block(cachep, &objp, 1, nodeid);
+               spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
+       }
+       return 1;
+}
+
 #else
 
 #define drain_alien_cache(cachep, alien) do { } while (0)
@@ -997,9 +1076,14 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
 {
 }
 
+static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
+{
+       return 0;
+}
+
 #endif
 
-static int __devinit cpuup_callback(struct notifier_block *nfb,
+static int cpuup_callback(struct notifier_block *nfb,
                                    unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
@@ -1263,8 +1347,7 @@ void __init kmem_cache_init(void)
                if (cache_cache.num)
                        break;
        }
-       if (!cache_cache.num)
-               BUG();
+       BUG_ON(!cache_cache.num);
        cache_cache.gfporder = order;
        cache_cache.colour = left_over / cache_cache.colour_off;
        cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
@@ -1295,6 +1378,8 @@ void __init kmem_cache_init(void)
                                NULL, NULL);
        }
 
+       slab_early_init = 0;
+
        while (sizes->cs_size != ULONG_MAX) {
                /*
                 * For performance, all the general caches are L1 aligned.
@@ -1311,12 +1396,6 @@ void __init kmem_cache_init(void)
                                        NULL, NULL);
                }
 
-               /* Inc off-slab bufctl limit until the ceiling is hit. */
-               if (!(OFF_SLAB(sizes->cs_cachep))) {
-                       offslab_limit = sizes->cs_size - sizeof(struct slab);
-                       offslab_limit /= sizeof(kmem_bufctl_t);
-               }
-
                sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
                                        sizes->cs_size,
                                        ARCH_KMALLOC_MINALIGN,
@@ -1416,24 +1495,29 @@ __initcall(cpucache_init);
 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
        struct page *page;
-       void *addr;
+       int nr_pages;
        int i;
 
+#ifndef CONFIG_MMU
+       /*
+        * Nommu uses slab's for process anonymous memory allocations, and thus
+        * requires __GFP_COMP to properly refcount higher order allocations
+        */
+       flags |= __GFP_COMP;
+#endif
        flags |= cachep->gfpflags;
+
        page = alloc_pages_node(nodeid, flags, cachep->gfporder);
        if (!page)
                return NULL;
-       addr = page_address(page);
 
-       i = (1 << cachep->gfporder);
+       nr_pages = (1 << cachep->gfporder);
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
-               atomic_add(i, &slab_reclaim_pages);
-       add_page_state(nr_slab, i);
-       while (i--) {
-               __SetPageSlab(page);
-               page++;
-       }
-       return addr;
+               atomic_add(nr_pages, &slab_reclaim_pages);
+       add_page_state(nr_slab, nr_pages);
+       for (i = 0; i < nr_pages; i++)
+               __SetPageSlab(page + i);
+       return page_address(page);
 }
 
 /*
@@ -1728,6 +1812,7 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
 static size_t calculate_slab_order(struct kmem_cache *cachep,
                        size_t size, size_t align, unsigned long flags)
 {
+       unsigned long offslab_limit;
        size_t left_over = 0;
        int gfporder;
 
@@ -1739,9 +1824,18 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
                if (!num)
                        continue;
 
-               /* More than offslab_limit objects will cause problems */
-               if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
-                       break;
+               if (flags & CFLGS_OFF_SLAB) {
+                       /*
+                        * Max number of objs-per-slab for caches which
+                        * use off-slab slabs. Needed to avoid a possible
+                        * looping condition in cache_grow().
+                        */
+                       offslab_limit = size - sizeof(struct slab);
+                       offslab_limit /= sizeof(kmem_bufctl_t);
+
+                       if (num > offslab_limit)
+                               break;
+               }
 
                /* Found something acceptable - save it away */
                cachep->num = num;
@@ -1862,8 +1956,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        void (*dtor)(void*, struct kmem_cache *, unsigned long))
 {
        size_t left_over, slab_size, ralign;
-       struct kmem_cache *cachep = NULL;
-       struct list_head *p;
+       struct kmem_cache *cachep = NULL, *pc;
 
        /*
         * Sanity checks... these are all serious usage bugs.
@@ -1883,8 +1976,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 
        mutex_lock(&cache_chain_mutex);
 
-       list_for_each(p, &cache_chain) {
-               struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
+       list_for_each_entry(pc, &cache_chain, next) {
                mm_segment_t old_fs = get_fs();
                char tmp;
                int res;
@@ -1940,8 +2032,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         * Always checks flags, a caller might be expecting debug support which
         * isn't available.
         */
-       if (flags & ~CREATE_MASK)
-               BUG();
+       BUG_ON(flags & ~CREATE_MASK);
 
        /*
         * Check that size is in terms of words.  This is needed to avoid
@@ -1987,10 +2078,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        align = ralign;
 
        /* Get cache's description obj. */
-       cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+       cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
        if (!cachep)
                goto oops;
-       memset(cachep, 0, sizeof(struct kmem_cache));
 
 #if DEBUG
        cachep->obj_size = size;
@@ -2020,8 +2110,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 #endif
 #endif
 
-       /* Determine if the slab management is 'on' or 'off' slab. */
-       if (size >= (PAGE_SIZE >> 3))
+       /*
+        * Determine if the slab management is 'on' or 'off' slab.
+        * (bootstrapping cannot cope with offslab caches so don't do
+        * it too early on.)
+        */
+       if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
                /*
                 * Size is large, assume best to place the slab management obj
                 * off-slab (should allow better packing of objs).
@@ -2150,11 +2244,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
        check_irq_on();
        for_each_online_node(node) {
                l3 = cachep->nodelists[node];
-               if (l3) {
+               if (l3 && l3->alien)
+                       drain_alien_cache(cachep, l3->alien);
+       }
+
+       for_each_online_node(node) {
+               l3 = cachep->nodelists[node];
+               if (l3)
                        drain_array(cachep, l3, l3->shared, 1, node);
-                       if (l3->alien)
-                               drain_alien_cache(cachep, l3->alien);
-               }
        }
 }
 
@@ -2173,8 +2270,7 @@ static int __node_shrink(struct kmem_cache *cachep, int node)
 
                slabp = list_entry(l3->slabs_free.prev, struct slab, list);
 #if DEBUG
-               if (slabp->inuse)
-                       BUG();
+               BUG_ON(slabp->inuse);
 #endif
                list_del(&slabp->list);
 
@@ -2215,8 +2311,7 @@ static int __cache_shrink(struct kmem_cache *cachep)
  */
 int kmem_cache_shrink(struct kmem_cache *cachep)
 {
-       if (!cachep || in_interrupt())
-               BUG();
+       BUG_ON(!cachep || in_interrupt());
 
        return __cache_shrink(cachep);
 }
@@ -2244,8 +2339,7 @@ int kmem_cache_destroy(struct kmem_cache *cachep)
        int i;
        struct kmem_list3 *l3;
 
-       if (!cachep || in_interrupt())
-               BUG();
+       BUG_ON(!cachep || in_interrupt());
 
        /* Don't let CPUs to come and go */
        lock_cpu_hotplug();
@@ -2290,13 +2384,15 @@ EXPORT_SYMBOL(kmem_cache_destroy);
 
 /* Get the memory for a slab management obj. */
 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
-                                  int colour_off, gfp_t local_flags)
+                                  int colour_off, gfp_t local_flags,
+                                  int nodeid)
 {
        struct slab *slabp;
 
        if (OFF_SLAB(cachep)) {
                /* Slab management obj is off-slab. */
-               slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
+               slabp = kmem_cache_alloc_node(cachep->slabp_cache,
+                                             local_flags, nodeid);
                if (!slabp)
                        return NULL;
        } else {
@@ -2306,6 +2402,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
        slabp->inuse = 0;
        slabp->colouroff = colour_off;
        slabp->s_mem = objp + colour_off;
+       slabp->nodeid = nodeid;
        return slabp;
 }
 
@@ -2397,7 +2494,7 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
        /* Verify that the slab belongs to the intended node */
        WARN_ON(slabp->nodeid != nodeid);
 
-       if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+       if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
                printk(KERN_ERR "slab: double free detected in cache "
                                "'%s', objp %p\n", cachep->name, objp);
                BUG();
@@ -2408,23 +2505,28 @@ static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
        slabp->inuse--;
 }
 
-static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
-                       void *objp)
+/*
+ * Map pages beginning at addr to the given cache and slab. This is required
+ * for the slab allocator to be able to lookup the cache and slab of a
+ * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
+ */
+static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
+                          void *addr)
 {
-       int i;
+       int nr_pages;
        struct page *page;
 
-       /* Nasty!!!!!! I hope this is OK. */
-       page = virt_to_page(objp);
+       page = virt_to_page(addr);
 
-       i = 1;
+       nr_pages = 1;
        if (likely(!PageCompound(page)))
-               i <<= cachep->gfporder;
+               nr_pages <<= cache->gfporder;
+
        do {
-               page_set_cache(page, cachep);
-               page_set_slab(page, slabp);
+               page_set_cache(page, cache);
+               page_set_slab(page, slab);
                page++;
-       } while (--i);
+       } while (--nr_pages);
 }
 
 /*
@@ -2444,8 +2546,7 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
         * Be lazy and only check for valid flags here,  keeping it out of the
         * critical path in kmem_cache_alloc().
         */
-       if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
-               BUG();
+       BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
        if (flags & SLAB_NO_GROW)
                return 0;
 
@@ -2492,12 +2593,12 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
                goto failed;
 
        /* Get slab management. */
-       slabp = alloc_slabmgmt(cachep, objp, offset, local_flags);
+       slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
        if (!slabp)
                goto opps1;
 
        slabp->nodeid = nodeid;
-       set_slab_attr(cachep, slabp, objp);
+       slab_map_pages(cachep, slabp, objp);
 
        cache_init_objs(cachep, slabp, ctor_flags);
 
@@ -2545,6 +2646,28 @@ static void kfree_debugcheck(const void *objp)
        }
 }
 
+static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
+{
+       unsigned long redzone1, redzone2;
+
+       redzone1 = *dbg_redzone1(cache, obj);
+       redzone2 = *dbg_redzone2(cache, obj);
+
+       /*
+        * Redzone is ok.
+        */
+       if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
+               return;
+
+       if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
+               slab_error(cache, "double free detected");
+       else
+               slab_error(cache, "memory outside object was overwritten");
+
+       printk(KERN_ERR "%p: redzone 1:0x%lx, redzone 2:0x%lx.\n",
+                       obj, redzone1, redzone2);
+}
+
 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
                                   void *caller)
 {
@@ -2556,27 +2679,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
        kfree_debugcheck(objp);
        page = virt_to_page(objp);
 
-       if (page_get_cache(page) != cachep) {
-               printk(KERN_ERR "mismatch in kmem_cache_free: expected "
-                               "cache %p, got %p\n",
-                      page_get_cache(page), cachep);
-               printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
-               printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
-                      page_get_cache(page)->name);
-               WARN_ON(1);
-       }
        slabp = page_get_slab(page);
 
        if (cachep->flags & SLAB_RED_ZONE) {
-               if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
-                               *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
-                       slab_error(cachep, "double free, or memory outside"
-                                               " object was overwritten");
-                       printk(KERN_ERR "%p: redzone 1:0x%lx, "
-                                       "redzone 2:0x%lx.\n",
-                              objp, *dbg_redzone1(cachep, objp),
-                              *dbg_redzone2(cachep, objp));
-               }
+               verify_redzone_free(cachep, objp);
                *dbg_redzone1(cachep, objp) = RED_INACTIVE;
                *dbg_redzone2(cachep, objp) = RED_INACTIVE;
        }
@@ -2603,6 +2709,9 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
                 */
                cachep->dtor(objp + obj_offset(cachep), cachep, 0);
        }
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+       slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
+#endif
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2675,20 +2784,10 @@ retry:
        BUG_ON(ac->avail > 0 || !l3);
        spin_lock(&l3->list_lock);
 
-       if (l3->shared) {
-               struct array_cache *shared_array = l3->shared;
-               if (shared_array->avail) {
-                       if (batchcount > shared_array->avail)
-                               batchcount = shared_array->avail;
-                       shared_array->avail -= batchcount;
-                       ac->avail = batchcount;
-                       memcpy(ac->entry,
-                              &(shared_array->entry[shared_array->avail]),
-                              sizeof(void *) * batchcount);
-                       shared_array->touched = 1;
-                       goto alloc_done;
-               }
-       }
+       /* See if we can refill from the shared array */
+       if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
+               goto alloc_done;
+
        while (batchcount > 0) {
                struct list_head *entry;
                struct slab *slabp;
@@ -2786,6 +2885,16 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+       {
+               struct slab *slabp;
+               unsigned objnr;
+
+               slabp = page_get_slab(virt_to_page(objp));
+               objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
+               slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
+       }
+#endif
        objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON) {
                unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -2807,11 +2916,10 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
        struct array_cache *ac;
 
 #ifdef CONFIG_NUMA
-       if (unlikely(current->mempolicy && !in_interrupt())) {
-               int nid = slab_node(current->mempolicy);
-
-               if (nid != numa_node_id())
-                       return __cache_alloc_node(cachep, flags, nid);
+       if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+               objp = alternate_node_alloc(cachep, flags);
+               if (objp != NULL)
+                       return objp;
        }
 #endif
 
@@ -2847,6 +2955,28 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
 
 #ifdef CONFIG_NUMA
 /*
+ * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
+ *
+ * If we are in_interrupt, then process context, including cpusets and
+ * mempolicy, may not apply and should not be used for allocation policy.
+ */
+static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+       int nid_alloc, nid_here;
+
+       if (in_interrupt())
+               return NULL;
+       nid_alloc = nid_here = numa_node_id();
+       if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
+               nid_alloc = cpuset_mem_spread_node();
+       else if (current->mempolicy)
+               nid_alloc = slab_node(current->mempolicy);
+       if (nid_alloc != nid_here)
+               return __cache_alloc_node(cachep, flags, nid_alloc);
+       return NULL;
+}
+
+/*
  * A interface to enable slab creation on nodeid
  */
 static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
@@ -3012,39 +3142,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
        check_irq_off();
        objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
 
-       /* Make sure we are not freeing a object from another
-        * node to the array cache on this cpu.
-        */
-#ifdef CONFIG_NUMA
-       {
-               struct slab *slabp;
-               slabp = virt_to_slab(objp);
-               if (unlikely(slabp->nodeid != numa_node_id())) {
-                       struct array_cache *alien = NULL;
-                       int nodeid = slabp->nodeid;
-                       struct kmem_list3 *l3;
-
-                       l3 = cachep->nodelists[numa_node_id()];
-                       STATS_INC_NODEFREES(cachep);
-                       if (l3->alien && l3->alien[nodeid]) {
-                               alien = l3->alien[nodeid];
-                               spin_lock(&alien->lock);
-                               if (unlikely(alien->avail == alien->limit))
-                                       __drain_alien_cache(cachep,
-                                                           alien, nodeid);
-                               alien->entry[alien->avail++] = objp;
-                               spin_unlock(&alien->lock);
-                       } else {
-                               spin_lock(&(cachep->nodelists[nodeid])->
-                                         list_lock);
-                               free_block(cachep, &objp, 1, nodeid);
-                               spin_unlock(&(cachep->nodelists[nodeid])->
-                                           list_lock);
-                       }
-                       return;
-               }
-       }
-#endif
+       if (cache_free_alien(cachep, objp))
+               return;
+
        if (likely(ac->avail < ac->limit)) {
                STATS_INC_FREEHIT(cachep);
                ac->entry[ac->avail++] = objp;
@@ -3071,6 +3171,23 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 EXPORT_SYMBOL(kmem_cache_alloc);
 
 /**
+ * kmem_cache_alloc - Allocate an object. The memory is set to zero.
+ * @cache: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache and set the allocated memory to zero.
+ * The flags are only relevant if the cache has no available objects.
+ */
+void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
+{
+       void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
+       if (ret)
+               memset(ret, 0, obj_size(cache));
+       return ret;
+}
+EXPORT_SYMBOL(kmem_cache_zalloc);
+
+/**
  * kmem_ptr_validate - check if an untrusted pointer might
  *     be a slab entry.
  * @cachep: the cache we're checking against
@@ -3197,22 +3314,23 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
        return __cache_alloc(cachep, flags, caller);
 }
 
-#ifndef CONFIG_DEBUG_SLAB
 
 void *__kmalloc(size_t size, gfp_t flags)
 {
+#ifndef CONFIG_DEBUG_SLAB
        return __do_kmalloc(size, flags, NULL);
+#else
+       return __do_kmalloc(size, flags, __builtin_return_address(0));
+#endif
 }
 EXPORT_SYMBOL(__kmalloc);
 
-#else
-
+#ifdef CONFIG_DEBUG_SLAB
 void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
 {
        return __do_kmalloc(size, flags, caller);
 }
 EXPORT_SYMBOL(__kmalloc_track_caller);
-
 #endif
 
 #ifdef CONFIG_SMP
@@ -3236,7 +3354,7 @@ void *__alloc_percpu(size_t size)
         * and we have no way of figuring out how to fix the array
         * that we have allocated then....
         */
-       for_each_cpu(i) {
+       for_each_possible_cpu(i) {
                int node = cpu_to_node(i);
 
                if (node_online(node))
@@ -3276,6 +3394,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 {
        unsigned long flags;
 
+       BUG_ON(virt_to_cache(objp) != cachep);
+
        local_irq_save(flags);
        __cache_free(cachep, objp);
        local_irq_restore(flags);
@@ -3323,7 +3443,7 @@ void free_percpu(const void *objp)
        /*
         * We allocate for all cpus so we cannot use for online cpu here.
         */
-       for_each_cpu(i)
+       for_each_possible_cpu(i)
            kfree(p->ptrs[i]);
        kfree(p);
 }
@@ -3343,63 +3463,86 @@ const char *kmem_cache_name(struct kmem_cache *cachep)
 EXPORT_SYMBOL_GPL(kmem_cache_name);
 
 /*
- * This initializes kmem_list3 for all nodes.
+ * This initializes kmem_list3 or resizes varioius caches for all nodes.
  */
 static int alloc_kmemlist(struct kmem_cache *cachep)
 {
        int node;
        struct kmem_list3 *l3;
-       int err = 0;
+       struct array_cache *new_shared;
+       struct array_cache **new_alien;
 
        for_each_online_node(node) {
-               struct array_cache *nc = NULL, *new;
-               struct array_cache **new_alien = NULL;
-#ifdef CONFIG_NUMA
+
                new_alien = alloc_alien_cache(node, cachep->limit);
                if (!new_alien)
                        goto fail;
-#endif
-               new = alloc_arraycache(node, cachep->shared*cachep->batchcount,
+
+               new_shared = alloc_arraycache(node,
+                               cachep->shared*cachep->batchcount,
                                        0xbaadf00d);
-               if (!new)
+               if (!new_shared) {
+                       free_alien_cache(new_alien);
                        goto fail;
+               }
+
                l3 = cachep->nodelists[node];
                if (l3) {
+                       struct array_cache *shared = l3->shared;
+
                        spin_lock_irq(&l3->list_lock);
 
-                       nc = cachep->nodelists[node]->shared;
-                       if (nc)
-                               free_block(cachep, nc->entry, nc->avail, node);
+                       if (shared)
+                               free_block(cachep, shared->entry,
+                                               shared->avail, node);
 
-                       l3->shared = new;
-                       if (!cachep->nodelists[node]->alien) {
+                       l3->shared = new_shared;
+                       if (!l3->alien) {
                                l3->alien = new_alien;
                                new_alien = NULL;
                        }
                        l3->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
                        spin_unlock_irq(&l3->list_lock);
-                       kfree(nc);
+                       kfree(shared);
                        free_alien_cache(new_alien);
                        continue;
                }
                l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
-               if (!l3)
+               if (!l3) {
+                       free_alien_cache(new_alien);
+                       kfree(new_shared);
                        goto fail;
+               }
 
                kmem_list3_init(l3);
                l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
                                ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-               l3->shared = new;
+               l3->shared = new_shared;
                l3->alien = new_alien;
                l3->free_limit = (1 + nr_cpus_node(node)) *
                                        cachep->batchcount + cachep->num;
                cachep->nodelists[node] = l3;
        }
-       return err;
+       return 0;
+
 fail:
-       err = -ENOMEM;
-       return err;
+       if (!cachep->next.next) {
+               /* Cache is not active yet. Roll back what we did */
+               node--;
+               while (node >= 0) {
+                       if (cachep->nodelists[node]) {
+                               l3 = cachep->nodelists[node];
+
+                               kfree(l3->shared);
+                               free_alien_cache(l3->alien);
+                               kfree(l3);
+                               cachep->nodelists[node] = NULL;
+                       }
+                       node--;
+               }
+       }
+       return -ENOMEM;
 }
 
 struct ccupdate_struct {
@@ -3562,7 +3705,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
  */
 static void cache_reap(void *unused)
 {
-       struct list_head *walk;
+       struct kmem_cache *searchp;
        struct kmem_list3 *l3;
        int node = numa_node_id();
 
@@ -3573,13 +3716,11 @@ static void cache_reap(void *unused)
                return;
        }
 
-       list_for_each(walk, &cache_chain) {
-               struct kmem_cache *searchp;
+       list_for_each_entry(searchp, &cache_chain, next) {
                struct list_head *p;
                int tofree;
                struct slab *slabp;
 
-               searchp = list_entry(walk, struct kmem_cache, next);
                check_irq_on();
 
                /*
@@ -3668,7 +3809,7 @@ static void print_slabinfo_header(struct seq_file *m)
        seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
 #if STATS
        seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
-                "<error> <maxfreeable> <nodeallocs> <remotefrees>");
+                "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
        seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
 #endif
        seq_putc(m, '\n');
@@ -3707,7 +3848,6 @@ static void s_stop(struct seq_file *m, void *p)
 static int s_show(struct seq_file *m, void *p)
 {
        struct kmem_cache *cachep = p;
-       struct list_head *q;
        struct slab *slabp;
        unsigned long active_objs;
        unsigned long num_objs;
@@ -3728,15 +3868,13 @@ static int s_show(struct seq_file *m, void *p)
                check_irq_on();
                spin_lock_irq(&l3->list_lock);
 
-               list_for_each(q, &l3->slabs_full) {
-                       slabp = list_entry(q, struct slab, list);
+               list_for_each_entry(slabp, &l3->slabs_full, list) {
                        if (slabp->inuse != cachep->num && !error)
                                error = "slabs_full accounting error";
                        active_objs += cachep->num;
                        active_slabs++;
                }
-               list_for_each(q, &l3->slabs_partial) {
-                       slabp = list_entry(q, struct slab, list);
+               list_for_each_entry(slabp, &l3->slabs_partial, list) {
                        if (slabp->inuse == cachep->num && !error)
                                error = "slabs_partial inuse accounting error";
                        if (!slabp->inuse && !error)
@@ -3744,8 +3882,7 @@ static int s_show(struct seq_file *m, void *p)
                        active_objs += slabp->inuse;
                        active_slabs++;
                }
-               list_for_each(q, &l3->slabs_free) {
-                       slabp = list_entry(q, struct slab, list);
+               list_for_each_entry(slabp, &l3->slabs_free, list) {
                        if (slabp->inuse && !error)
                                error = "slabs_free/inuse accounting error";
                        num_slabs++;
@@ -3782,11 +3919,12 @@ static int s_show(struct seq_file *m, void *p)
                unsigned long max_freeable = cachep->max_freeable;
                unsigned long node_allocs = cachep->node_allocs;
                unsigned long node_frees = cachep->node_frees;
+               unsigned long overflows = cachep->node_overflow;
 
                seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
-                               %4lu %4lu %4lu %4lu", allocs, high, grown,
+                               %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
                                reaped, errors, max_freeable, node_allocs,
-                               node_frees);
+                               node_frees, overflows);
        }
        /* cpu stats */
        {
@@ -3837,7 +3975,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
 {
        char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
        int limit, batchcount, shared, res;
-       struct list_head *p;
+       struct kmem_cache *cachep;
 
        if (count > MAX_SLABINFO_WRITE)
                return -EINVAL;
@@ -3856,10 +3994,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
        /* Find the cache in the chain of caches. */
        mutex_lock(&cache_chain_mutex);
        res = -EINVAL;
-       list_for_each(p, &cache_chain) {
-               struct kmem_cache *cachep;
-
-               cachep = list_entry(p, struct kmem_cache, next);
+       list_for_each_entry(cachep, &cache_chain, next) {
                if (!strcmp(cachep->name, kbuf)) {
                        if (limit < 1 || batchcount < 1 ||
                                        batchcount > limit || shared < 0) {
@@ -3876,6 +4011,154 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
                res = count;
        return res;
 }
+
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+
+static void *leaks_start(struct seq_file *m, loff_t *pos)
+{
+       loff_t n = *pos;
+       struct list_head *p;
+
+       mutex_lock(&cache_chain_mutex);
+       p = cache_chain.next;
+       while (n--) {
+               p = p->next;
+               if (p == &cache_chain)
+                       return NULL;
+       }
+       return list_entry(p, struct kmem_cache, next);
+}
+
+static inline int add_caller(unsigned long *n, unsigned long v)
+{
+       unsigned long *p;
+       int l;
+       if (!v)
+               return 1;
+       l = n[1];
+       p = n + 2;
+       while (l) {
+               int i = l/2;
+               unsigned long *q = p + 2 * i;
+               if (*q == v) {
+                       q[1]++;
+                       return 1;
+               }
+               if (*q > v) {
+                       l = i;
+               } else {
+                       p = q + 2;
+                       l -= i + 1;
+               }
+       }
+       if (++n[1] == n[0])
+               return 0;
+       memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
+       p[0] = v;
+       p[1] = 1;
+       return 1;
+}
+
+static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
+{
+       void *p;
+       int i;
+       if (n[0] == n[1])
+               return;
+       for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
+               if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
+                       continue;
+               if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
+                       return;
+       }
+}
+
+static void show_symbol(struct seq_file *m, unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+       char *modname;
+       const char *name;
+       unsigned long offset, size;
+       char namebuf[KSYM_NAME_LEN+1];
+
+       name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
+
+       if (name) {
+               seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
+               if (modname)
+                       seq_printf(m, " [%s]", modname);
+               return;
+       }
+#endif
+       seq_printf(m, "%p", (void *)address);
+}
+
+static int leaks_show(struct seq_file *m, void *p)
+{
+       struct kmem_cache *cachep = p;
+       struct slab *slabp;
+       struct kmem_list3 *l3;
+       const char *name;
+       unsigned long *n = m->private;
+       int node;
+       int i;
+
+       if (!(cachep->flags & SLAB_STORE_USER))
+               return 0;
+       if (!(cachep->flags & SLAB_RED_ZONE))
+               return 0;
+
+       /* OK, we can do it */
+
+       n[1] = 0;
+
+       for_each_online_node(node) {
+               l3 = cachep->nodelists[node];
+               if (!l3)
+                       continue;
+
+               check_irq_on();
+               spin_lock_irq(&l3->list_lock);
+
+               list_for_each_entry(slabp, &l3->slabs_full, list)
+                       handle_slab(n, cachep, slabp);
+               list_for_each_entry(slabp, &l3->slabs_partial, list)
+                       handle_slab(n, cachep, slabp);
+               spin_unlock_irq(&l3->list_lock);
+       }
+       name = cachep->name;
+       if (n[0] == n[1]) {
+               /* Increase the buffer size */
+               mutex_unlock(&cache_chain_mutex);
+               m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
+               if (!m->private) {
+                       /* Too bad, we are really out */
+                       m->private = n;
+                       mutex_lock(&cache_chain_mutex);
+                       return -ENOMEM;
+               }
+               *(unsigned long *)m->private = n[0] * 2;
+               kfree(n);
+               mutex_lock(&cache_chain_mutex);
+               /* Now make sure this entry will be retried */
+               m->count = m->size;
+               return 0;
+       }
+       for (i = 0; i < n[1]; i++) {
+               seq_printf(m, "%s: %lu ", name, n[2*i+3]);
+               show_symbol(m, n[2*i+2]);
+               seq_putc(m, '\n');
+       }
+       return 0;
+}
+
+struct seq_operations slabstats_op = {
+       .start = leaks_start,
+       .next = s_next,
+       .stop = s_stop,
+       .show = leaks_show,
+};
+#endif
 #endif
 
 /**