nfsd: 4.1 has an rfc number
[safe/jmp/linux-2.6] / mm / slub.c
index d73f771..8d71aaf 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -21,7 +21,6 @@
 #include <linux/kmemcheck.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
-#include <linux/kmemleak.h>
 #include <linux/mempolicy.h>
 #include <linux/ctype.h>
 #include <linux/debugobjects.h>
@@ -655,7 +654,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
        print_section("Padding", end - remainder, remainder);
 
-       restore_bytes(s, "slab padding", POISON_INUSE, start, end);
+       restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
 }
 
@@ -1072,6 +1071,8 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
 }
 #define slub_debug 0
 
+#define disable_higher_order_debug 0
+
 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
                                                        { return 0; }
 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
@@ -1127,8 +1128,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        }
 
        if (kmemcheck_enabled
-               && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
-       {
+               && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
                int pages = 1 << oo_order(oo);
 
                kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
@@ -1735,7 +1735,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
        }
        local_irq_restore(flags);
 
-       if (unlikely((gfpflags & __GFP_ZERO) && object))
+       if (unlikely(gfpflags & __GFP_ZERO) && object)
                memset(object, 0, objsize);
 
        kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
@@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 }
 EXPORT_SYMBOL(kmem_cache_alloc);
 
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
 void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
 {
        return slab_alloc(s, gfpflags, -1, _RET_IP_);
@@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 EXPORT_SYMBOL(kmem_cache_alloc_node);
 #endif
 
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
 void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
                                    gfp_t gfpflags,
                                    int node)
@@ -2023,7 +2023,7 @@ static inline int calculate_order(int size)
                                return order;
                        fraction /= 2;
                }
-               min_objects --;
+               min_objects--;
        }
 
        /*
@@ -2113,8 +2113,8 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
  */
 #define NR_KMEM_CACHE_CPU 100
 
-static DEFINE_PER_CPU(struct kmem_cache_cpu,
-                               kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
+static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
+                     kmem_cache_cpu);
 
 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
 static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
@@ -2629,8 +2629,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
  */
 void kmem_cache_destroy(struct kmem_cache *s)
 {
-       if (s->flags & SLAB_DESTROY_BY_RCU)
-               rcu_barrier();
        down_write(&slub_lock);
        s->refcount--;
        if (!s->refcount) {
@@ -2641,6 +2639,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
                                "still has objects.\n", s->name, __func__);
                        dump_stack();
                }
+               if (s->flags & SLAB_DESTROY_BY_RCU)
+                       rcu_barrier();
                sysfs_slab_remove(s);
        } else
                up_write(&slub_lock);
@@ -2874,13 +2874,15 @@ EXPORT_SYMBOL(__kmalloc);
 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
        struct page *page;
+       void *ptr = NULL;
 
        flags |= __GFP_COMP | __GFP_NOTRACK;
        page = alloc_pages_node(node, flags, get_order(size));
        if (page)
-               return page_address(page);
-       else
-               return NULL;
+               ptr = page_address(page);
+
+       kmemleak_alloc(ptr, size, 1, flags);
+       return ptr;
 }
 
 #ifdef CONFIG_NUMA
@@ -2965,6 +2967,7 @@ void kfree(const void *x)
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
                BUG_ON(!PageCompound(page));
+               kmemleak_free(x);
                put_page(page);
                return;
        }
@@ -3342,6 +3345,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
 {
        struct kmem_cache *s;
 
+       if (WARN_ON(!name))
+               return NULL;
+
        down_write(&slub_lock);
        s = find_mergeable(size, align, flags, name, ctor);
        if (s) {
@@ -4365,12 +4371,28 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
        return len + sprintf(buf + len, "\n");
 }
 
+static void clear_stat(struct kmem_cache *s, enum stat_item si)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               get_cpu_slab(s, cpu)->stat[si] = 0;
+}
+
 #define STAT_ATTR(si, text)                                    \
 static ssize_t text##_show(struct kmem_cache *s, char *buf)    \
 {                                                              \
        return show_stat(s, buf, si);                           \
 }                                                              \
-SLAB_ATTR_RO(text);                                            \
+static ssize_t text##_store(struct kmem_cache *s,              \
+                               const char *buf, size_t length) \
+{                                                              \
+       if (buf[0] != '0')                                      \
+               return -EINVAL;                                 \
+       clear_stat(s, si);                                      \
+       return length;                                          \
+}                                                              \
+SLAB_ATTR(text);                                               \
 
 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);