X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=mm%2Fslob.c;h=52bc8a2bd9efe71b0df16d068abcefe282a3dfc1;hb=1173960b0e85761811a421eb0bbcefb117eb7535;hp=b4899079d8b00e32660f8c36437c3b558bef4823;hpb=84a01c2f8ea9bf210b961c6301e8e870a46505a6;p=safe%2Fjmp%2Flinux-2.6 diff --git a/mm/slob.c b/mm/slob.c index b489907..52bc8a2 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -12,10 +12,17 @@ * allocator is as little as 2 bytes, however typically most architectures * will require 4 bytes on 32-bit and 8 bytes on 64-bit. * - * The slob heap is a linked list of pages from alloc_pages(), and - * within each page, there is a singly-linked list of free blocks (slob_t). - * The heap is grown on demand and allocation from the heap is currently - * first-fit. + * The slob heap is a set of linked list of pages from alloc_pages(), + * and within each page, there is a singly-linked list of free blocks + * (slob_t). The heap is grown on demand. To reduce fragmentation, + * heap pages are segregated into three lists, with objects less than + * 256 bytes, objects less than 1024 bytes, and all other objects. + * + * Allocation from heap involves first searching for a page with + * sufficient free blocks (using a next-fit-like approach) followed by + * a first-fit scan of the page. Deallocation inserts objects back + * into the free list in address order, so this is effectively an + * address-ordered first fit. * * Above this is an implementation of kmalloc/kfree. Blocks returned * from kmalloc are prepended with a 4-byte header with the kmalloc size. @@ -110,26 +117,30 @@ static inline void free_slob_page(struct slob_page *sp) } /* - * All (partially) free slob pages go on this list. + * All partially free slob pages go on these lists. */ -static LIST_HEAD(free_slob_pages); +#define SLOB_BREAK1 256 +#define SLOB_BREAK2 1024 +static LIST_HEAD(free_slob_small); +static LIST_HEAD(free_slob_medium); +static LIST_HEAD(free_slob_large); /* * slob_page: True for all slob pages (false for bigblock pages) */ static inline int slob_page(struct slob_page *sp) { - return test_bit(PG_active, &sp->flags); + return PageSlobPage((struct page *)sp); } static inline void set_slob_page(struct slob_page *sp) { - __set_bit(PG_active, &sp->flags); + __SetPageSlobPage((struct page *)sp); } static inline void clear_slob_page(struct slob_page *sp) { - __clear_bit(PG_active, &sp->flags); + __ClearPageSlobPage((struct page *)sp); } /* @@ -137,19 +148,19 @@ static inline void clear_slob_page(struct slob_page *sp) */ static inline int slob_page_free(struct slob_page *sp) { - return test_bit(PG_private, &sp->flags); + return PageSlobFree((struct page *)sp); } -static inline void set_slob_page_free(struct slob_page *sp) +static void set_slob_page_free(struct slob_page *sp, struct list_head *list) { - list_add(&sp->list, &free_slob_pages); - __set_bit(PG_private, &sp->flags); + list_add(&sp->list, list); + __SetPageSlobFree((struct page *)sp); } static inline void clear_slob_page_free(struct slob_page *sp) { list_del(&sp->list); - __clear_bit(PG_private, &sp->flags); + __ClearPageSlobFree((struct page *)sp); } #define SLOB_UNIT sizeof(slob_t) @@ -293,12 +304,21 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) { struct slob_page *sp; + struct list_head *prev; + struct list_head *slob_list; slob_t *b = NULL; unsigned long flags; + if (size < SLOB_BREAK1) + slob_list = &free_slob_small; + else if (size < SLOB_BREAK2) + slob_list = &free_slob_medium; + else + slob_list = &free_slob_large; + spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ - list_for_each_entry(sp, &free_slob_pages, list) { + list_for_each_entry(sp, slob_list, list) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial @@ -307,18 +327,29 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) if (node != -1 && page_to_nid(&sp->page) != node) continue; #endif + /* Enough room on this page? */ + if (sp->units < SLOB_UNITS(size)) + continue; - if (sp->units >= SLOB_UNITS(size)) { - b = slob_page_alloc(sp, size, align); - if (b) - break; - } + /* Attempt to alloc */ + prev = sp->list.prev; + b = slob_page_alloc(sp, size, align); + if (!b) + continue; + + /* Improve fragment distribution and reduce our average + * search time by starting our next search here. (see + * Knuth vol 1, sec 2.5, pg 449) */ + if (prev != slob_list->prev && + slob_list->next != prev->next) + list_move_tail(slob_list, prev->next); + break; } spin_unlock_irqrestore(&slob_lock, flags); /* Not enough space: must allocate a new page */ if (!b) { - b = slob_new_page(gfp, 0, node); + b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); if (!b) return 0; sp = (struct slob_page *)virt_to_page(b); @@ -329,11 +360,13 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) sp->free = b; INIT_LIST_HEAD(&sp->list); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); - set_slob_page_free(sp); + set_slob_page_free(sp, slob_list); b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } + if (unlikely((gfp & __GFP_ZERO) && b)) + memset(b, 0, size); return b; } @@ -347,7 +380,7 @@ static void slob_free(void *block, int size) slobidx_t units; unsigned long flags; - if (!block) + if (unlikely(ZERO_OR_NULL_PTR(block))) return; BUG_ON(!size); @@ -373,7 +406,7 @@ static void slob_free(void *block, int size) set_slob(b, units, (void *)((unsigned long)(b + SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); - set_slob_page_free(sp); + set_slob_page_free(sp, &free_slob_small); goto out; } @@ -384,6 +417,10 @@ static void slob_free(void *block, int size) sp->units += units; if (b < sp->free) { + if (b + units == sp->free) { + units += slob_units(sp->free); + sp->free = slob_next(sp->free); + } set_slob(b, units, sp->free); sp->free = b; } else { @@ -424,13 +461,17 @@ out: void *__kmalloc_node(size_t size, gfp_t gfp, int node) { + unsigned int *m; int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); if (size < PAGE_SIZE - align) { - unsigned int *m; + if (!size) + return ZERO_SIZE_PTR; + m = slob_alloc(size + align, gfp, align, node); - if (m) - *m = size; + if (!m) + return NULL; + *m = size; return (void *)m + align; } else { void *ret; @@ -446,44 +487,11 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) } EXPORT_SYMBOL(__kmalloc_node); -/** - * krealloc - reallocate memory. The contents will remain unchanged. - * - * @p: object to reallocate memory for. - * @new_size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - * - * The contents of the object pointed to are preserved up to the - * lesser of the new and old sizes. If @p is %NULL, krealloc() - * behaves exactly like kmalloc(). If @size is 0 and @p is not a - * %NULL pointer, the object pointed to is freed. - */ -void *krealloc(const void *p, size_t new_size, gfp_t flags) -{ - void *ret; - - if (unlikely(!p)) - return kmalloc_track_caller(new_size, flags); - - if (unlikely(!new_size)) { - kfree(p); - return NULL; - } - - ret = kmalloc_track_caller(new_size, flags); - if (ret) { - memcpy(ret, p, min(new_size, ksize(p))); - kfree(p); - } - return ret; -} -EXPORT_SYMBOL(krealloc); - void kfree(const void *block) { struct slob_page *sp; - if (!block) + if (unlikely(ZERO_OR_NULL_PTR(block))) return; sp = (struct slob_page *)virt_to_page(block); @@ -501,31 +509,34 @@ size_t ksize(const void *block) { struct slob_page *sp; - if (!block) + BUG_ON(!block); + if (unlikely(block == ZERO_SIZE_PTR)) return 0; sp = (struct slob_page *)virt_to_page(block); - if (slob_page(sp)) - return ((slob_t *)block - 1)->units + SLOB_UNIT; - else + if (slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); + unsigned int *m = (unsigned int *)(block - align); + return SLOB_UNITS(*m) * SLOB_UNIT; + } else return sp->page.private; } +EXPORT_SYMBOL(ksize); struct kmem_cache { unsigned int size, align; unsigned long flags; const char *name; - void (*ctor)(void *, struct kmem_cache *, unsigned long); + void (*ctor)(void *); }; struct kmem_cache *kmem_cache_create(const char *name, size_t size, - size_t align, unsigned long flags, - void (*ctor)(void*, struct kmem_cache *, unsigned long), - void (*dtor)(void*, struct kmem_cache *, unsigned long)) + size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *c; - c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1); + c = slob_alloc(sizeof(struct kmem_cache), + GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); if (c) { c->name = name; @@ -565,22 +576,12 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) b = slob_new_page(flags, get_order(c->size), node); if (c->ctor) - c->ctor(b, c, 0); + c->ctor(b); return b; } EXPORT_SYMBOL(kmem_cache_alloc_node); -void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags) -{ - void *ret = kmem_cache_alloc(c, flags); - if (ret) - memset(ret, 0, c->size); - - return ret; -} -EXPORT_SYMBOL(kmem_cache_zalloc); - static void __kmem_cache_free(void *b, int size) { if (size < PAGE_SIZE)