git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
percpu: fix unit_map[] verification in pcpu_setup_first_chunk()
[safe/jmp/linux-2.6]
/
mm
/
slub.c
diff --git
a/mm/slub.c
b/mm/slub.c
index
d73f771
..
4996fc7
100644
(file)
--- a/
mm/slub.c
+++ b/
mm/slub.c
@@
-21,7
+21,6
@@
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
-#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
@@
-655,7
+654,7
@@
static int slab_pad_check(struct kmem_cache *s, struct page *page)
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
print_section("Padding", end - remainder, remainder);
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
print_section("Padding", end - remainder, remainder);
- restore_bytes(s, "slab padding", POISON_INUSE,
start
, end);
+ restore_bytes(s, "slab padding", POISON_INUSE,
end - remainder
, end);
return 0;
}
return 0;
}
@@
-1072,6
+1071,8
@@
static inline unsigned long kmem_cache_flags(unsigned long objsize,
}
#define slub_debug 0
}
#define slub_debug 0
+#define disable_higher_order_debug 0
+
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
@@
-1127,8
+1128,7
@@
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
}
if (kmemcheck_enabled
}
if (kmemcheck_enabled
- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS)))
- {
+ && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
int pages = 1 << oo_order(oo);
kmemcheck_alloc_shadow(page, oo_order(oo), flags, node);
@@
-2023,7
+2023,7
@@
static inline int calculate_order(int size)
return order;
fraction /= 2;
}
return order;
fraction /= 2;
}
- min_objects
--;
+ min_objects--;
}
/*
}
/*
@@
-2113,8
+2113,8
@@
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
*/
#define NR_KMEM_CACHE_CPU 100
*/
#define NR_KMEM_CACHE_CPU 100
-static DEFINE_PER_CPU(struct kmem_cache_cpu,
-
kmem_cache_cpu)[NR_KMEM_CACHE_CPU]
;
+static DEFINE_PER_CPU(struct kmem_cache_cpu
[NR_KMEM_CACHE_CPU]
,
+
kmem_cache_cpu)
;
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
@@
-2629,8
+2629,6
@@
static inline int kmem_cache_close(struct kmem_cache *s)
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
*/
void kmem_cache_destroy(struct kmem_cache *s)
{
- if (s->flags & SLAB_DESTROY_BY_RCU)
- rcu_barrier();
down_write(&slub_lock);
s->refcount--;
if (!s->refcount) {
down_write(&slub_lock);
s->refcount--;
if (!s->refcount) {
@@
-2641,6
+2639,8
@@
void kmem_cache_destroy(struct kmem_cache *s)
"still has objects.\n", s->name, __func__);
dump_stack();
}
"still has objects.\n", s->name, __func__);
dump_stack();
}
+ if (s->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
sysfs_slab_remove(s);
} else
up_write(&slub_lock);
sysfs_slab_remove(s);
} else
up_write(&slub_lock);
@@
-2874,13
+2874,15
@@
EXPORT_SYMBOL(__kmalloc);
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
struct page *page;
+ void *ptr = NULL;
flags |= __GFP_COMP | __GFP_NOTRACK;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
flags |= __GFP_COMP | __GFP_NOTRACK;
page = alloc_pages_node(node, flags, get_order(size));
if (page)
- return page_address(page);
- else
- return NULL;
+ ptr = page_address(page);
+
+ kmemleak_alloc(ptr, size, 1, flags);
+ return ptr;
}
#ifdef CONFIG_NUMA
}
#ifdef CONFIG_NUMA
@@
-2965,6
+2967,7
@@
void kfree(const void *x)
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
page = virt_to_head_page(x);
if (unlikely(!PageSlab(page))) {
BUG_ON(!PageCompound(page));
+ kmemleak_free(x);
put_page(page);
return;
}
put_page(page);
return;
}
@@
-3342,6
+3345,9
@@
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *s;
{
struct kmem_cache *s;
+ if (WARN_ON(!name))
+ return NULL;
+
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {