* allocator is as little as 2 bytes, however typically most architectures
* will require 4 bytes on 32-bit and 8 bytes on 64-bit.
*
- * The slob heap is a linked list of pages from alloc_pages(), and
- * within each page, there is a singly-linked list of free blocks (slob_t).
- * The heap is grown on demand and allocation from the heap is currently
- * first-fit.
+ * The slob heap is a set of linked list of pages from alloc_pages(),
+ * and within each page, there is a singly-linked list of free blocks
+ * (slob_t). The heap is grown on demand. To reduce fragmentation,
+ * heap pages are segregated into three lists, with objects less than
+ * 256 bytes, objects less than 1024 bytes, and all other objects.
+ *
+ * Allocation from heap involves first searching for a page with
+ * sufficient free blocks (using a next-fit-like approach) followed by
+ * a first-fit scan of the page. Deallocation inserts objects back
+ * into the free list in address order, so this is effectively an
+ * address-ordered first fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
* NUMA support in SLOB is fairly simplistic, pushing most of the real
* logic down to the page allocator, and simply doing the node accounting
* on the upper levels. In the event that a node id is explicitly
- * provided, alloc_pages_node() with the specified node id is used
+ * provided, alloc_pages_exact_node() with the specified node id is used
* instead. The common case (or when the node id isn't explicitly provided)
* will default to the current node, as per numa_node_id().
*
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <linux/kmemtrace.h>
+#include <linux/kmemleak.h>
#include <asm/atomic.h>
/*
}
/*
- * All (partially) free slob pages go on this list.
+ * All partially free slob pages go on these lists.
*/
-static LIST_HEAD(free_slob_pages);
+#define SLOB_BREAK1 256
+#define SLOB_BREAK2 1024
+static LIST_HEAD(free_slob_small);
+static LIST_HEAD(free_slob_medium);
+static LIST_HEAD(free_slob_large);
/*
- * slob_page: True for all slob pages (false for bigblock pages)
+ * is_slob_page: True for all slob pages (false for bigblock pages)
*/
-static inline int slob_page(struct slob_page *sp)
+static inline int is_slob_page(struct slob_page *sp)
{
- return test_bit(PG_active, &sp->flags);
+ return PageSlab((struct page *)sp);
}
static inline void set_slob_page(struct slob_page *sp)
{
- __set_bit(PG_active, &sp->flags);
+ __SetPageSlab((struct page *)sp);
}
static inline void clear_slob_page(struct slob_page *sp)
{
- __clear_bit(PG_active, &sp->flags);
+ __ClearPageSlab((struct page *)sp);
+}
+
+static inline struct slob_page *slob_page(const void *addr)
+{
+ return (struct slob_page *)virt_to_page(addr);
}
/*
*/
static inline int slob_page_free(struct slob_page *sp)
{
- return test_bit(PG_private, &sp->flags);
+ return PageSlobFree((struct page *)sp);
}
-static inline void set_slob_page_free(struct slob_page *sp)
+static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
{
- list_add(&sp->list, &free_slob_pages);
- __set_bit(PG_private, &sp->flags);
+ list_add(&sp->list, list);
+ __SetPageSlobFree((struct page *)sp);
}
static inline void clear_slob_page_free(struct slob_page *sp)
{
list_del(&sp->list);
- __clear_bit(PG_private, &sp->flags);
+ __ClearPageSlobFree((struct page *)sp);
}
#define SLOB_UNIT sizeof(slob_t)
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-static void *slob_new_page(gfp_t gfp, int order, int node)
+static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
#ifdef CONFIG_NUMA
if (node != -1)
- page = alloc_pages_node(node, gfp, order);
+ page = alloc_pages_exact_node(node, gfp, order);
else
#endif
page = alloc_pages(gfp, order);
return page_address(page);
}
+static void slob_free_pages(void *b, int order)
+{
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += 1 << order;
+ free_pages((unsigned long)b, order);
+}
+
/*
* Allocate a slob block within a given slob_page sp.
*/
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
- slob_t *prev, *cur, *aligned = 0;
+ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
{
struct slob_page *sp;
struct list_head *prev;
+ struct list_head *slob_list;
slob_t *b = NULL;
unsigned long flags;
+ if (size < SLOB_BREAK1)
+ slob_list = &free_slob_small;
+ else if (size < SLOB_BREAK2)
+ slob_list = &free_slob_medium;
+ else
+ slob_list = &free_slob_large;
+
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, &free_slob_pages, list) {
+ list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
/* Improve fragment distribution and reduce our average
* search time by starting our next search here. (see
* Knuth vol 1, sec 2.5, pg 449) */
- if (prev != free_slob_pages.prev &&
- free_slob_pages.next != prev->next)
- list_move_tail(&free_slob_pages, prev->next);
+ if (prev != slob_list->prev &&
+ slob_list->next != prev->next)
+ list_move_tail(slob_list, prev->next);
break;
}
spin_unlock_irqrestore(&slob_lock, flags);
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- return 0;
- sp = (struct slob_page *)virt_to_page(b);
+ return NULL;
+ sp = slob_page(b);
set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags);
sp->free = b;
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
- set_slob_page_free(sp);
+ set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
return;
BUG_ON(!size);
- sp = (struct slob_page *)virt_to_page(block);
+ sp = slob_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
+ spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
- free_page((unsigned long)b);
- goto out;
+ slob_free_pages(b, 0);
+ return;
}
if (!slob_page_free(sp)) {
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
- set_slob_page_free(sp);
+ set_slob_page_free(sp, &free_slob_small);
goto out;
}
sp->units += units;
if (b < sp->free) {
+ if (b + units == sp->free) {
+ units += slob_units(sp->free);
+ sp->free = slob_next(sp->free);
+ }
set_slob(b, units, sp->free);
sp->free = b;
} else {
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ void *ret;
+
+ lockdep_trace_alloc(gfp);
if (size < PAGE_SIZE - align) {
if (!size)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
- if (m)
- *m = size;
- return (void *)m + align;
+
+ if (!m)
+ return NULL;
+ *m = size;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, size + align, gfp, node);
} else {
- void *ret;
+ unsigned int order = get_order(size);
- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
page->private = size;
}
- return ret;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
}
+
+ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
{
struct slob_page *sp;
+ trace_kfree(_RET_IP_, block);
+
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
+ kmemleak_free(block);
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp))
- return ((slob_t *)block - 1)->units + SLOB_UNIT;
- else
+ sp = slob_page(block);
+ if (is_slob_page(sp)) {
+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ unsigned int *m = (unsigned int *)(block - align);
+ return SLOB_UNITS(*m) * SLOB_UNIT;
+ } else
return sp->page.private;
}
EXPORT_SYMBOL(ksize);
unsigned int size, align;
unsigned long flags;
const char *name;
- void (*ctor)(struct kmem_cache *, void *);
+ void (*ctor)(void *);
};
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags,
- void (*ctor)(struct kmem_cache *, void *))
+ size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *c;
- c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
+ c = slob_alloc(sizeof(struct kmem_cache),
+ GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
if (c) {
c->name = name;
} else if (flags & SLAB_PANIC)
panic("Cannot create slab cache %s\n", name);
+ kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
return c;
}
EXPORT_SYMBOL(kmem_cache_create);
void kmem_cache_destroy(struct kmem_cache *c)
{
+ kmemleak_free(c);
+ if (c->flags & SLAB_DESTROY_BY_RCU)
+ rcu_barrier();
slob_free(c, sizeof(struct kmem_cache));
}
EXPORT_SYMBOL(kmem_cache_destroy);
{
void *b;
- if (c->size < PAGE_SIZE)
+ if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
- else
- b = slob_new_page(flags, get_order(c->size), node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
+ b = slob_new_pages(flags, get_order(c->size), node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
if (c->ctor)
- c->ctor(c, b);
+ c->ctor(b);
+ kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
return b;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
if (size < PAGE_SIZE)
slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(size));
+ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
+ kmemleak_free_recursive(b, c->flags);
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
} else {
__kmem_cache_free(b, c->size);
}
+
+ trace_kmem_cache_free(_RET_IP_, b);
}
EXPORT_SYMBOL(kmem_cache_free);
{
slob_ready = 1;
}
+
+void __init kmem_cache_init_late(void)
+{
+ /* Nothing to do */
+}