#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <trace/kmemtrace.h>
+#include <linux/kmemtrace.h>
+#include <linux/kmemleak.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
- unsigned long min_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
* Slab cache management.
*/
struct kmem_cache {
+ struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */
unsigned long flags;
int size; /* The size of an object including meta data */
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
+ unsigned long min_partial;
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#endif
-#ifdef CONFIG_SMP
- struct kmem_cache_cpu *cpu_slab[NR_CPUS];
-#else
- struct kmem_cache_cpu cpu_slab;
-#endif
};
/*
* This should be dropped to PAGE_SIZE / 2 once the page allocator
* "fastpath" becomes competitive with the slab allocator fastpaths.
*/
-#define SLUB_MAX_SIZE (PAGE_SIZE)
+#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
-#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1)
+#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
/*
* We keep the general caches in an array of slab caches that are used for
if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;
-#if KMALLOC_MIN_SIZE <= 64
- if (size > 64 && size <= 96)
+ if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
return 1;
- if (size > 128 && size <= 192)
+ if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
return 2;
-#endif
if (size <= 8) return 3;
if (size <= 16) return 4;
if (size <= 32) return 5;
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
#else
static __always_inline void *
unsigned int order = get_order(size);
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
- kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
- size, PAGE_SIZE << order, flags);
+ kmemleak_alloc(ret, size, 1, flags);
+ trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
return ret;
}
ret = kmem_cache_alloc_notrace(s, flags);
- kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
- _THIS_IP_, ret,
- size, s->size, flags);
+ trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node);
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-#ifdef CONFIG_KMEMTRACE
+#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
gfp_t gfpflags,
int node);
ret = kmem_cache_alloc_node_notrace(s, flags, node);
- kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
- _THIS_IP_, ret,
- size, s->size, flags, node);
+ trace_kmalloc_node(_THIS_IP_, ret,
+ size, s->size, flags, node);
return ret;
}