Revert "Ignore madvise(MADV_WILLNEED) for hugetlbfs-backed regions"
[safe/jmp/linux-2.6] / mm / slab.c
index 7f72bb3..9a90b00 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/cpu.h>
 #include       <linux/sysctl.h>
 #include       <linux/module.h>
-#include       <tracing/kmemtrace.h>
+#include       <trace/kmemtrace.h>
 #include       <linux/rcupdate.h>
 #include       <linux/string.h>
 #include       <linux/uaccess.h>
@@ -1169,7 +1169,7 @@ static void __cpuinit cpuup_canceled(long cpu)
        struct kmem_cache *cachep;
        struct kmem_list3 *l3 = NULL;
        int node = cpu_to_node(cpu);
-       node_to_cpumask_ptr(mask, node);
+       const struct cpumask *mask = cpumask_of_node(node);
 
        list_for_each_entry(cachep, &cache_chain, next) {
                struct array_cache *nc;
@@ -2132,6 +2132,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
  *
  * @name must be valid until the cache is destroyed. This implies that
  * the module calling this has to destroy the cache before getting unloaded.
+ * Note that kmem_cache_name() is not guaranteed to return the same pointer,
+ * therefore applications must manage it themselves.
  *
  * The flags are
  *
@@ -2164,7 +2166,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
 
        /*
         * We use cache_chain_mutex to ensure a consistent view of
-        * cpu_online_map as well.  Please see cpuup_callback
+        * cpu_online_mask as well.  Please see cpuup_callback
         */
        get_online_cpus();
        mutex_lock(&cache_chain_mutex);
@@ -2618,7 +2620,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
        if (OFF_SLAB(cachep)) {
                /* Slab management obj is off-slab. */
                slabp = kmem_cache_alloc_node(cachep->slabp_cache,
-                                             local_flags & ~GFP_THISNODE, nodeid);
+                                             local_flags, nodeid);
                if (!slabp)
                        return NULL;
        } else {
@@ -3006,7 +3008,7 @@ retry:
                 * there must be at least one object available for
                 * allocation.
                 */
-               BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
+               BUG_ON(slabp->inuse >= cachep->num);
 
                while (slabp->inuse < cachep->num && batchcount--) {
                        STATS_INC_ALLOCED(cachep);
@@ -3115,79 +3117,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
 #endif
 
-#ifdef CONFIG_FAILSLAB
-
-static struct failslab_attr {
-
-       struct fault_attr attr;
-
-       u32 ignore_gfp_wait;
-#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
-       struct dentry *ignore_gfp_wait_file;
-#endif
-
-} failslab = {
-       .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = 1,
-};
-
-static int __init setup_failslab(char *str)
-{
-       return setup_fault_attr(&failslab.attr, str);
-}
-__setup("failslab=", setup_failslab);
-
-static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
+static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
 {
        if (cachep == &cache_cache)
-               return 0;
-       if (flags & __GFP_NOFAIL)
-               return 0;
-       if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
-               return 0;
+               return false;
 
-       return should_fail(&failslab.attr, obj_size(cachep));
+       return should_failslab(obj_size(cachep), flags);
 }
 
-#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
-
-static int __init failslab_debugfs(void)
-{
-       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
-       struct dentry *dir;
-       int err;
-
-       err = init_fault_attr_dentries(&failslab.attr, "failslab");
-       if (err)
-               return err;
-       dir = failslab.attr.dentries.dir;
-
-       failslab.ignore_gfp_wait_file =
-               debugfs_create_bool("ignore-gfp-wait", mode, dir,
-                                     &failslab.ignore_gfp_wait);
-
-       if (!failslab.ignore_gfp_wait_file) {
-               err = -ENOMEM;
-               debugfs_remove(failslab.ignore_gfp_wait_file);
-               cleanup_fault_attr_dentries(&failslab.attr);
-       }
-
-       return err;
-}
-
-late_initcall(failslab_debugfs);
-
-#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
-
-#else /* CONFIG_FAILSLAB */
-
-static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
-{
-       return 0;
-}
-
-#endif /* CONFIG_FAILSLAB */
-
 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
        void *objp;
@@ -3390,7 +3327,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
        unsigned long save_flags;
        void *ptr;
 
-       if (should_failslab(cachep, flags))
+       lockdep_trace_alloc(flags);
+
+       if (slab_should_failslab(cachep, flags))
                return NULL;
 
        cache_alloc_debugcheck_before(cachep, flags);
@@ -3466,7 +3405,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
        unsigned long save_flags;
        void *objp;
 
-       if (should_failslab(cachep, flags))
+       lockdep_trace_alloc(flags);
+
+       if (slab_should_failslab(cachep, flags))
                return NULL;
 
        cache_alloc_debugcheck_before(cachep, flags);
@@ -3624,8 +3565,8 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
        void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
 
-       kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
-                            obj_size(cachep), cachep->buffer_size, flags);
+       trace_kmem_cache_alloc(_RET_IP_, ret,
+                              obj_size(cachep), cachep->buffer_size, flags);
 
        return ret;
 }
@@ -3686,9 +3627,9 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
        void *ret = __cache_alloc_node(cachep, flags, nodeid,
                                       __builtin_return_address(0));
 
-       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
-                                 obj_size(cachep), cachep->buffer_size,
-                                 flags, nodeid);
+       trace_kmem_cache_alloc_node(_RET_IP_, ret,
+                                   obj_size(cachep), cachep->buffer_size,
+                                   flags, nodeid);
 
        return ret;
 }
@@ -3716,9 +3657,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
                return cachep;
        ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
 
-       kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
-                                 (unsigned long) caller, ret,
-                                 size, cachep->buffer_size, flags, node);
+       trace_kmalloc_node((unsigned long) caller, ret,
+                          size, cachep->buffer_size, flags, node);
 
        return ret;
 }
@@ -3768,9 +3708,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
                return cachep;
        ret = __cache_alloc(cachep, flags, caller);
 
-       kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
-                            (unsigned long) caller, ret,
-                            size, cachep->buffer_size, flags);
+       trace_kmalloc((unsigned long) caller, ret,
+                     size, cachep->buffer_size, flags);
 
        return ret;
 }
@@ -3816,7 +3755,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
        __cache_free(cachep, objp);
        local_irq_restore(flags);
 
-       kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
+       trace_kmem_cache_free(_RET_IP_, objp);
 }
 EXPORT_SYMBOL(kmem_cache_free);
 
@@ -3834,6 +3773,8 @@ void kfree(const void *objp)
        struct kmem_cache *c;
        unsigned long flags;
 
+       trace_kfree(_RET_IP_, objp);
+
        if (unlikely(ZERO_OR_NULL_PTR(objp)))
                return;
        local_irq_save(flags);
@@ -3843,8 +3784,6 @@ void kfree(const void *objp)
        debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
-
-       kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
 }
 EXPORT_SYMBOL(kfree);
 
@@ -4108,8 +4047,7 @@ static void cache_reap(struct work_struct *w)
        struct kmem_cache *searchp;
        struct kmem_list3 *l3;
        int node = numa_node_id();
-       struct delayed_work *work =
-               container_of(w, struct delayed_work, work);
+       struct delayed_work *work = to_delayed_work(w);
 
        if (!mutex_trylock(&cache_chain_mutex))
                /* Give up. Setup the next iteration. */
@@ -4577,3 +4515,4 @@ size_t ksize(const void *objp)
 
        return obj_size(virt_to_cache(objp));
 }
+EXPORT_SYMBOL(ksize);