git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'hibernate'
[safe/jmp/linux-2.6]
/
mm
/
slub.c
diff --git
a/mm/slub.c
b/mm/slub.c
index
7ec2888
..
0280eee
100644
(file)
--- a/
mm/slub.c
+++ b/
mm/slub.c
@@
-24,6
+24,7
@@
#include <linux/kallsyms.h>
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/kallsyms.h>
#include <linux/memory.h>
#include <linux/math64.h>
+#include <linux/fault-inject.h>
/*
* Lock order:
/*
* Lock order:
@@
-182,7
+183,7
@@
static LIST_HEAD(slab_caches);
* Tracking user of a slab.
*/
struct track {
* Tracking user of a slab.
*/
struct track {
-
void *addr;
/* Called from address */
+
unsigned long addr;
/* Called from address */
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
@@
-371,7
+372,7
@@
static struct track *get_track(struct kmem_cache *s, void *object,
}
static void set_track(struct kmem_cache *s, void *object,
}
static void set_track(struct kmem_cache *s, void *object,
-
enum track_item alloc, void *
addr)
+
enum track_item alloc, unsigned long
addr)
{
struct track *p;
{
struct track *p;
@@
-395,8
+396,8
@@
static void init_tracking(struct kmem_cache *s, void *object)
if (!(s->flags & SLAB_STORE_USER))
return;
if (!(s->flags & SLAB_STORE_USER))
return;
- set_track(s, object, TRACK_FREE,
NUL
L);
- set_track(s, object, TRACK_ALLOC,
NUL
L);
+ set_track(s, object, TRACK_FREE,
0U
L);
+ set_track(s, object, TRACK_ALLOC,
0U
L);
}
static void print_track(const char *s, struct track *t)
}
static void print_track(const char *s, struct track *t)
@@
-405,7
+406,7
@@
static void print_track(const char *s, struct track *t)
return;
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
return;
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
- s, t->addr, jiffies - t->when, t->cpu, t->pid);
+ s,
(void *)
t->addr, jiffies - t->when, t->cpu, t->pid);
}
static void print_tracking(struct kmem_cache *s, void *object)
}
static void print_tracking(struct kmem_cache *s, void *object)
@@
-696,7
+697,7
@@
static int check_object(struct kmem_cache *s, struct page *page,
if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
object_err(s, page, p, "Freepointer corrupt");
/*
if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
object_err(s, page, p, "Freepointer corrupt");
/*
- * No choice but to zap it and thus lo
o
se the remainder
+ * No choice but to zap it and thus lose the remainder
* of the free objects in this slab. May cause
* another error because the object count is now wrong.
*/
* of the free objects in this slab. May cause
* another error because the object count is now wrong.
*/
@@
-870,7
+871,7
@@
static void setup_object_debug(struct kmem_cache *s, struct page *page,
}
static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
}
static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
-
void *object, void *
addr)
+
void *object, unsigned long
addr)
{
if (!check_slab(s, page))
goto bad;
{
if (!check_slab(s, page))
goto bad;
@@
-910,7
+911,7
@@
bad:
}
static int free_debug_processing(struct kmem_cache *s, struct page *page,
}
static int free_debug_processing(struct kmem_cache *s, struct page *page,
-
void *object, void *
addr)
+
void *object, unsigned long
addr)
{
if (!check_slab(s, page))
goto fail;
{
if (!check_slab(s, page))
goto fail;
@@
-1033,10
+1034,10
@@
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
- struct page *page, void *object,
void *
addr) { return 0; }
+ struct page *page, void *object,
unsigned long
addr) { return 0; }
static inline int free_debug_processing(struct kmem_cache *s,
static inline int free_debug_processing(struct kmem_cache *s,
- struct page *page, void *object,
void *
addr) { return 0; }
+ struct page *page, void *object,
unsigned long
addr) { return 0; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
@@
-1503,8
+1504,8
@@
static inline int node_match(struct kmem_cache_cpu *c, int node)
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*/
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*/
-static void *__slab_alloc(struct kmem_cache *s,
-
gfp_t gfpflags, int node, void *
addr, struct kmem_cache_cpu *c)
+static void *__slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node,
+
unsigned long
addr, struct kmem_cache_cpu *c)
{
void **object;
struct page *new;
{
void **object;
struct page *new;
@@
-1588,13
+1589,18
@@
debug:
* Otherwise we can simply pick the next object from the lockless free list.
*/
static __always_inline void *slab_alloc(struct kmem_cache *s,
* Otherwise we can simply pick the next object from the lockless free list.
*/
static __always_inline void *slab_alloc(struct kmem_cache *s,
- gfp_t gfpflags, int node,
void *
addr)
+ gfp_t gfpflags, int node,
unsigned long
addr)
{
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
unsigned int objsize;
{
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
unsigned int objsize;
+ might_sleep_if(gfpflags & __GFP_WAIT);
+
+ if (should_failslab(s->objsize, gfpflags))
+ return NULL;
+
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
objsize = c->objsize;
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
objsize = c->objsize;
@@
-1617,14
+1623,14
@@
static __always_inline void *slab_alloc(struct kmem_cache *s,
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
- return slab_alloc(s, gfpflags, -1, _
_builtin_return_address(0)
);
+ return slab_alloc(s, gfpflags, -1, _
RET_IP_
);
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
- return slab_alloc(s, gfpflags, node, _
_builtin_return_address(0)
);
+ return slab_alloc(s, gfpflags, node, _
RET_IP_
);
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
@@
-1638,7
+1644,7
@@
EXPORT_SYMBOL(kmem_cache_alloc_node);
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
-
void *x, void *
addr, unsigned int offset)
+
void *x, unsigned long
addr, unsigned int offset)
{
void *prior;
void **object = (void *)x;
{
void *prior;
void **object = (void *)x;
@@
-1708,7
+1714,7
@@
debug:
* with all sorts of special processing.
*/
static __always_inline void slab_free(struct kmem_cache *s,
* with all sorts of special processing.
*/
static __always_inline void slab_free(struct kmem_cache *s,
- struct page *page, void *x,
void *
addr)
+ struct page *page, void *x,
unsigned long
addr)
{
void **object = (void *)x;
struct kmem_cache_cpu *c;
{
void **object = (void *)x;
struct kmem_cache_cpu *c;
@@
-1735,11
+1741,11
@@
void kmem_cache_free(struct kmem_cache *s, void *x)
page = virt_to_head_page(x);
page = virt_to_head_page(x);
- slab_free(s, page, x, _
_builtin_return_address(0)
);
+ slab_free(s, page, x, _
RET_IP_
);
}
EXPORT_SYMBOL(kmem_cache_free);
}
EXPORT_SYMBOL(kmem_cache_free);
-/* Figure out on which slab
object
the object resides */
+/* Figure out on which slab
page
the object resides */
static struct page *get_object_page(const void *x)
{
struct page *page = virt_to_head_page(x);
static struct page *get_object_page(const void *x)
{
struct page *page = virt_to_head_page(x);
@@
-1964,7
+1970,7
@@
static DEFINE_PER_CPU(struct kmem_cache_cpu,
kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
-static
cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE
;
+static
DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS)
;
static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
int cpu, gfp_t flags)
static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
int cpu, gfp_t flags)
@@
-1990,7
+1996,7
@@
static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
{
if (c < per_cpu(kmem_cache_cpu, cpu) ||
static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
{
if (c < per_cpu(kmem_cache_cpu, cpu) ||
- c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
+ c >
=
per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
kfree(c);
return;
}
kfree(c);
return;
}
@@
-2039,13
+2045,13
@@
static void init_alloc_cpu_cpu(int cpu)
{
int i;
{
int i;
- if (cpu
_isset(cpu, kmem_cach_cpu_free_init_once
))
+ if (cpu
mask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)
))
return;
for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
return;
for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
- cpu
_set(cpu, kmem_cach_cpu_free_init_once
);
+ cpu
mask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)
);
}
static void __init init_alloc_cpu(void)
}
static void __init init_alloc_cpu(void)
@@
-2077,8
+2083,7
@@
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
* when allocating for the kmalloc_node_cache. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
* when allocating for the kmalloc_node_cache. This is used for bootstrapping
* memory on a fresh node that has no slab structures yet.
*/
-static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
- int node)
+static void early_kmem_cache_node_alloc(gfp_t gfpflags, int node)
{
struct page *page;
struct kmem_cache_node *n;
{
struct page *page;
struct kmem_cache_node *n;
@@
-2116,7
+2121,6
@@
static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
local_irq_save(flags);
add_partial(n, page, 0);
local_irq_restore(flags);
local_irq_save(flags);
add_partial(n, page, 0);
local_irq_restore(flags);
- return n;
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@
-2148,8
+2152,7
@@
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
n = &s->local_node;
else {
if (slab_state == DOWN) {
n = &s->local_node;
else {
if (slab_state == DOWN) {
- n = early_kmem_cache_node_alloc(gfpflags,
- node);
+ early_kmem_cache_node_alloc(gfpflags, node);
continue;
}
n = kmem_cache_alloc_node(kmalloc_caches,
continue;
}
n = kmem_cache_alloc_node(kmalloc_caches,
@@
-2251,7
+2254,7
@@
static int calculate_sizes(struct kmem_cache *s, int forced_order)
* Add some empty padding so that we can catch
* overwrites from earlier objects rather than let
* tracking information or the free pointer be
* Add some empty padding so that we can catch
* overwrites from earlier objects rather than let
* tracking information or the free pointer be
- * corrupted if a
n
user writes before the start
+ * corrupted if a user writes before the start
* of the object.
*/
size += sizeof(void *);
* of the object.
*/
size += sizeof(void *);
@@
-2663,7
+2666,7
@@
void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
- return slab_alloc(s, flags, -1, _
_builtin_return_address(0)
);
+ return slab_alloc(s, flags, -1, _
RET_IP_
);
}
EXPORT_SYMBOL(__kmalloc);
}
EXPORT_SYMBOL(__kmalloc);
@@
-2691,7
+2694,7
@@
void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;
- return slab_alloc(s, flags, node, _
_builtin_return_address(0)
);
+ return slab_alloc(s, flags, node, _
RET_IP_
);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@
-2733,6
+2736,7
@@
size_t ksize(const void *object)
*/
return s->size;
}
*/
return s->size;
}
+EXPORT_SYMBOL(ksize);
void kfree(const void *x)
{
void kfree(const void *x)
{
@@
-2748,7
+2752,7
@@
void kfree(const void *x)
put_page(page);
return;
}
put_page(page);
return;
}
- slab_free(page->slab, page, object, _
_builtin_return_address(0)
);
+ slab_free(page->slab, page, object, _
RET_IP_
);
}
EXPORT_SYMBOL(kfree);
}
EXPORT_SYMBOL(kfree);
@@
-2935,8
+2939,10
@@
static int slab_memory_callback(struct notifier_block *self,
case MEM_CANCEL_OFFLINE:
break;
}
case MEM_CANCEL_OFFLINE:
break;
}
-
- ret = notifier_from_errno(ret);
+ if (ret)
+ ret = notifier_from_errno(ret);
+ else
+ ret = NOTIFY_OK;
return ret;
}
return ret;
}
@@
-3125,8
+3131,12
@@
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);
- if (sysfs_slab_alias(s, name))
+ if (sysfs_slab_alias(s, name)) {
+ down_write(&slub_lock);
+ s->refcount--;
+ up_write(&slub_lock);
goto err;
goto err;
+ }
return s;
}
return s;
}
@@
-3136,8
+3146,13
@@
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
up_write(&slub_lock);
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
up_write(&slub_lock);
- if (sysfs_slab_add(s))
+ if (sysfs_slab_add(s)) {
+ down_write(&slub_lock);
+ list_del(&s->list);
+ up_write(&slub_lock);
+ kfree(s);
goto err;
goto err;
+ }
return s;
}
kfree(s);
return s;
}
kfree(s);
@@
-3204,7
+3219,7
@@
static struct notifier_block __cpuinitdata slab_notifier = {
#endif
#endif
-void *__kmalloc_track_caller(size_t size, gfp_t gfpflags,
void *
caller)
+void *__kmalloc_track_caller(size_t size, gfp_t gfpflags,
unsigned long
caller)
{
struct kmem_cache *s;
{
struct kmem_cache *s;
@@
-3220,7
+3235,7
@@
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
- int node,
void *
caller)
+ int node,
unsigned long
caller)
{
struct kmem_cache *s;
{
struct kmem_cache *s;
@@
-3431,13
+3446,13
@@
static void resiliency_test(void) {};
struct location {
unsigned long count;
struct location {
unsigned long count;
-
void *
addr;
+
unsigned long
addr;
long long sum_time;
long min_time;
long max_time;
long min_pid;
long max_pid;
long long sum_time;
long min_time;
long max_time;
long min_pid;
long max_pid;
-
cpumask_t cpus
;
+
DECLARE_BITMAP(cpus, NR_CPUS)
;
nodemask_t nodes;
};
nodemask_t nodes;
};
@@
-3479,7
+3494,7
@@
static int add_location(struct loc_track *t, struct kmem_cache *s,
{
long start, end, pos;
struct location *l;
{
long start, end, pos;
struct location *l;
-
void *
caddr;
+
unsigned long
caddr;
unsigned long age = jiffies - track->when;
start = -1;
unsigned long age = jiffies - track->when;
start = -1;
@@
-3512,7
+3527,8
@@
static int add_location(struct loc_track *t, struct kmem_cache *s,
if (track->pid > l->max_pid)
l->max_pid = track->pid;
if (track->pid > l->max_pid)
l->max_pid = track->pid;
- cpu_set(track->cpu, l->cpus);
+ cpumask_set_cpu(track->cpu,
+ to_cpumask(l->cpus));
}
node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1;
}
node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1;
@@
-3542,8
+3558,8
@@
static int add_location(struct loc_track *t, struct kmem_cache *s,
l->max_time = age;
l->min_pid = track->pid;
l->max_pid = track->pid;
l->max_time = age;
l->min_pid = track->pid;
l->max_pid = track->pid;
- cpu
s_clear(l->cpus
);
- cpu
_set(track->cpu, l->cpus
);
+ cpu
mask_clear(to_cpumask(l->cpus)
);
+ cpu
mask_set_cpu(track->cpu, to_cpumask(l->cpus)
);
nodes_clear(l->nodes);
node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1;
nodes_clear(l->nodes);
node_set(page_to_nid(virt_to_page(track)), l->nodes);
return 1;
@@
-3599,7
+3615,7
@@
static int list_locations(struct kmem_cache *s, char *buf,
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i];
- if (len > PAGE_SIZE - 100)
+ if (len > PAGE_SIZE -
KSYM_SYMBOL_LEN -
100)
break;
len += sprintf(buf + len, "%7ld ", l->count);
break;
len += sprintf(buf + len, "%7ld ", l->count);
@@
-3624,11
+3640,12
@@
static int list_locations(struct kmem_cache *s, char *buf,
len += sprintf(buf + len, " pid=%ld",
l->min_pid);
len += sprintf(buf + len, " pid=%ld",
l->min_pid);
- if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
+ if (num_online_cpus() > 1 &&
+ !cpumask_empty(to_cpumask(l->cpus)) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
-
l->cpus
);
+
to_cpumask(l->cpus)
);
}
if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
}
if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
@@
-4347,7
+4364,7
@@
static void sysfs_slab_remove(struct kmem_cache *s)
/*
* Need to buffer aliases during bootup until sysfs becomes
/*
* Need to buffer aliases during bootup until sysfs becomes
- * available lest we lo
o
se that information.
+ * available lest we lose that information.
*/
struct saved_alias {
struct kmem_cache *s;
*/
struct saved_alias {
struct kmem_cache *s;