git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rtc: v3020: fix v3020_mmio_read_bit()
[safe/jmp/linux-2.6]
/
mm
/
slob.c
diff --git
a/mm/slob.c
b/mm/slob.c
index
0bfa680
..
837ebd6
100644
(file)
--- a/
mm/slob.c
+++ b/
mm/slob.c
@@
-46,7
+46,7
@@
* NUMA support in SLOB is fairly simplistic, pushing most of the real
* logic down to the page allocator, and simply doing the node accounting
* on the upper levels. In the event that a node id is explicitly
* NUMA support in SLOB is fairly simplistic, pushing most of the real
* logic down to the page allocator, and simply doing the node accounting
* on the upper levels. In the event that a node id is explicitly
- * provided, alloc_pages_node() with the specified node id is used
+ * provided, alloc_pages_
exact_
node() with the specified node id is used
* instead. The common case (or when the node id isn't explicitly provided)
* will default to the current node, as per numa_node_id().
*
* instead. The common case (or when the node id isn't explicitly provided)
* will default to the current node, as per numa_node_id().
*
@@
-60,11
+60,14
@@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <linux/kmemtrace.h>
+#include <linux/kmemleak.h>
#include <asm/atomic.h>
/*
#include <asm/atomic.h>
/*
@@
-130,17
+133,17
@@
static LIST_HEAD(free_slob_large);
*/
static inline int is_slob_page(struct slob_page *sp)
{
*/
static inline int is_slob_page(struct slob_page *sp)
{
- return PageSl
obPage
((struct page *)sp);
+ return PageSl
ab
((struct page *)sp);
}
static inline void set_slob_page(struct slob_page *sp)
{
}
static inline void set_slob_page(struct slob_page *sp)
{
- __SetPageSl
obPage
((struct page *)sp);
+ __SetPageSl
ab
((struct page *)sp);
}
static inline void clear_slob_page(struct slob_page *sp)
{
}
static inline void clear_slob_page(struct slob_page *sp)
{
- __ClearPageSl
obPage
((struct page *)sp);
+ __ClearPageSl
ab
((struct page *)sp);
}
static inline struct slob_page *slob_page(const void *addr)
}
static inline struct slob_page *slob_page(const void *addr)
@@
-241,7
+244,7
@@
static void *slob_new_pages(gfp_t gfp, int order, int node)
#ifdef CONFIG_NUMA
if (node != -1)
#ifdef CONFIG_NUMA
if (node != -1)
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_
exact_
node(node, gfp, order);
else
#endif
page = alloc_pages(gfp, order);
else
#endif
page = alloc_pages(gfp, order);
@@
-254,6
+257,8
@@
static void *slob_new_pages(gfp_t gfp, int order, int node)
static void slob_free_pages(void *b, int order)
{
static void slob_free_pages(void *b, int order)
{
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += 1 << order;
free_pages((unsigned long)b, order);
}
free_pages((unsigned long)b, order);
}
@@
-406,7
+411,7
@@
static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
-
free_page((unsigned long)b
);
+
slob_free_pages(b, 0
);
return;
}
return;
}
@@
-474,18
+479,25
@@
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ void *ret;
+
+ lockdep_trace_alloc(gfp);
if (size < PAGE_SIZE - align) {
if (!size)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
if (size < PAGE_SIZE - align) {
if (!size)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
+
if (!m)
return NULL;
*m = size;
if (!m)
return NULL;
*m = size;
- return (void *)m + align;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, size + align, gfp, node);
} else {
} else {
-
void *ret
;
+
unsigned int order = get_order(size)
;
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
@@
-493,8
+505,13
@@
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page = virt_to_page(ret);
page->private = size;
}
page = virt_to_page(ret);
page->private = size;
}
- return ret;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
}
}
+
+ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
}
EXPORT_SYMBOL(__kmalloc_node);
@@
-502,8
+519,11
@@
void kfree(const void *block)
{
struct slob_page *sp;
{
struct slob_page *sp;
+ trace_kfree(_RET_IP_, block);
+
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
+ kmemleak_free(block);
sp = slob_page(block);
if (is_slob_page(sp)) {
sp = slob_page(block);
if (is_slob_page(sp)) {
@@
-567,12
+587,16
@@
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
} else if (flags & SLAB_PANIC)
panic("Cannot create slab cache %s\n", name);
} else if (flags & SLAB_PANIC)
panic("Cannot create slab cache %s\n", name);
+ kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
return c;
}
EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *c)
{
return c;
}
EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *c)
{
+ kmemleak_free(c);
+ if (c->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);
slob_free(c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@
-581,14
+605,22
@@
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
{
void *b;
- if (c->size < PAGE_SIZE)
+ if (c->size < PAGE_SIZE)
{
b = slob_alloc(c->size, flags, c->align, node);
b = slob_alloc(c->size, flags, c->align, node);
- else
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
b = slob_new_pages(flags, get_order(c->size), node);
b = slob_new_pages(flags, get_order(c->size), node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
if (c->ctor)
c->ctor(b);
if (c->ctor)
c->ctor(b);
+ kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
return b;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
return b;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
@@
-611,6
+643,7
@@
static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
void kmem_cache_free(struct kmem_cache *c, void *b)
{
+ kmemleak_free_recursive(b, c->flags);
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
@@
-620,6
+653,8
@@
void kmem_cache_free(struct kmem_cache *c, void *b)
} else {
__kmem_cache_free(b, c->size);
}
} else {
__kmem_cache_free(b, c->size);
}
+
+ trace_kmem_cache_free(_RET_IP_, b);
}
EXPORT_SYMBOL(kmem_cache_free);
}
EXPORT_SYMBOL(kmem_cache_free);
@@
-657,3
+692,8
@@
void __init kmem_cache_init(void)
{
slob_ready = 1;
}
{
slob_ready = 1;
}
+
+void __init kmem_cache_init_late(void)
+{
+ /* Nothing to do */
+}