* allocator is as little as 2 bytes, however typically most architectures
* will require 4 bytes on 32-bit and 8 bytes on 64-bit.
*
- * The slob heap is a linked list of pages from alloc_pages(), and
- * within each page, there is a singly-linked list of free blocks (slob_t).
- * The heap is grown on demand and allocation from the heap is currently
- * first-fit.
+ * The slob heap is a set of linked list of pages from alloc_pages(),
+ * and within each page, there is a singly-linked list of free blocks
+ * (slob_t). The heap is grown on demand. To reduce fragmentation,
+ * heap pages are segregated into three lists, with objects less than
+ * 256 bytes, objects less than 1024 bytes, and all other objects.
+ *
+ * Allocation from heap involves first searching for a page with
+ * sufficient free blocks (using a next-fit-like approach) followed by
+ * a first-fit scan of the page. Deallocation inserts objects back
+ * into the free list in address order, so this is effectively an
+ * address-ordered first fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
}
/*
- * All (partially) free slob pages go on this list.
+ * All partially free slob pages go on these lists.
*/
-static LIST_HEAD(free_slob_pages);
+#define SLOB_BREAK1 256
+#define SLOB_BREAK2 1024
+static LIST_HEAD(free_slob_small);
+static LIST_HEAD(free_slob_medium);
+static LIST_HEAD(free_slob_large);
/*
* slob_page: True for all slob pages (false for bigblock pages)
return test_bit(PG_private, &sp->flags);
}
-static inline void set_slob_page_free(struct slob_page *sp)
+static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
{
- list_add(&sp->list, &free_slob_pages);
+ list_add(&sp->list, list);
__set_bit(PG_private, &sp->flags);
}
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
struct slob_page *sp;
+ struct list_head *prev;
+ struct list_head *slob_list;
slob_t *b = NULL;
unsigned long flags;
+ if (size < SLOB_BREAK1)
+ slob_list = &free_slob_small;
+ else if (size < SLOB_BREAK2)
+ slob_list = &free_slob_medium;
+ else
+ slob_list = &free_slob_large;
+
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, &free_slob_pages, list) {
+ list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
if (node != -1 && page_to_nid(&sp->page) != node)
continue;
#endif
+ /* Enough room on this page? */
+ if (sp->units < SLOB_UNITS(size))
+ continue;
- if (sp->units >= SLOB_UNITS(size)) {
- b = slob_page_alloc(sp, size, align);
- if (b)
- break;
- }
+ /* Attempt to alloc */
+ prev = sp->list.prev;
+ b = slob_page_alloc(sp, size, align);
+ if (!b)
+ continue;
+
+ /* Improve fragment distribution and reduce our average
+ * search time by starting our next search here. (see
+ * Knuth vol 1, sec 2.5, pg 449) */
+ if (prev != slob_list->prev &&
+ slob_list->next != prev->next)
+ list_move_tail(slob_list, prev->next);
+ break;
}
spin_unlock_irqrestore(&slob_lock, flags);
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp, 0, node);
+ b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
if (!b)
return 0;
sp = (struct slob_page *)virt_to_page(b);
sp->free = b;
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
- set_slob_page_free(sp);
+ set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
}
+ if (unlikely((gfp & __GFP_ZERO) && b))
+ memset(b, 0, size);
return b;
}
slobidx_t units;
unsigned long flags;
- if (!block)
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
BUG_ON(!size);
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
- set_slob_page_free(sp);
+ set_slob_page_free(sp, &free_slob_small);
goto out;
}
sp->units += units;
if (b < sp->free) {
+ if (b + units == sp->free) {
+ units += slob_units(sp->free);
+ sp->free = slob_next(sp->free);
+ }
set_slob(b, units, sp->free);
sp->free = b;
} else {
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
+ unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
if (size < PAGE_SIZE - align) {
- unsigned int *m;
+ if (!size)
+ return ZERO_SIZE_PTR;
+
m = slob_alloc(size + align, gfp, align, node);
if (m)
*m = size;
}
EXPORT_SYMBOL(__kmalloc_node);
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- *
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * The contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes. If @p is %NULL, krealloc()
- * behaves exactly like kmalloc(). If @size is 0 and @p is not a
- * %NULL pointer, the object pointed to is freed.
- */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
-
- if (unlikely(!p))
- return kmalloc_track_caller(new_size, flags);
-
- if (unlikely(!new_size)) {
- kfree(p);
- return NULL;
- }
-
- ret = kmalloc_track_caller(new_size, flags);
- if (ret) {
- memcpy(ret, p, min(new_size, ksize(p)));
- kfree(p);
- }
- return ret;
-}
-EXPORT_SYMBOL(krealloc);
-
void kfree(const void *block)
{
struct slob_page *sp;
- if (!block)
+ if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
sp = (struct slob_page *)virt_to_page(block);
{
struct slob_page *sp;
- if (!block)
+ BUG_ON(!block);
+ if (unlikely(block == ZERO_SIZE_PTR))
return 0;
sp = (struct slob_page *)virt_to_page(block);
else
return sp->page.private;
}
+EXPORT_SYMBOL(ksize);
struct kmem_cache {
unsigned int size, align;
unsigned long flags;
const char *name;
- void (*ctor)(void *, struct kmem_cache *, unsigned long);
+ void (*ctor)(struct kmem_cache *, void *);
};
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void*, struct kmem_cache *, unsigned long),
- void (*dtor)(void*, struct kmem_cache *, unsigned long))
+ void (*ctor)(struct kmem_cache *, void *))
{
struct kmem_cache *c;
- c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
+ c = slob_alloc(sizeof(struct kmem_cache),
+ flags, ARCH_KMALLOC_MINALIGN, -1);
if (c) {
c->name = name;
b = slob_new_page(flags, get_order(c->size), node);
if (c->ctor)
- c->ctor(b, c, 0);
+ c->ctor(c, b);
return b;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
-void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
-{
- void *ret = kmem_cache_alloc(c, flags);
- if (ret)
- memset(ret, 0, c->size);
-
- return ret;
-}
-EXPORT_SYMBOL(kmem_cache_zalloc);
-
static void __kmem_cache_free(void *b, int size)
{
if (size < PAGE_SIZE)