X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fslub_def.h;h=79d59c937fac4ca62d0ca98c0b3249d1842d78c1;hb=69626f23bce6521367ac1e6a2a6e8fba8f0a848a;hp=ea27065e80e636d6a9b41e7a49144186cf79020d;hpb=643b113849d8faa68c9f01c3c9d929bfbffd50bd;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index ea27065..79d59c9 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -11,12 +11,45 @@ #include #include +enum stat_item { + ALLOC_FASTPATH, /* Allocation from cpu slab */ + ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ + FREE_FASTPATH, /* Free to cpu slub */ + FREE_SLOWPATH, /* Freeing not to cpu slab */ + FREE_FROZEN, /* Freeing to frozen slab */ + FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ + FREE_REMOVE_PARTIAL, /* Freeing removes last object */ + ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ + ALLOC_SLAB, /* Cpu slab acquired from page allocator */ + ALLOC_REFILL, /* Refill cpu slab from slab freelist */ + FREE_SLAB, /* Slab freed to the page allocator */ + CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ + DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ + DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ + DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ + DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ + DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ + NR_SLUB_STAT_ITEMS }; + +struct kmem_cache_cpu { + void **freelist; /* Pointer to first free per cpu object */ + struct page *page; /* The slab from which we are allocating */ + int node; /* The node of the page (or -1 for debug) */ + unsigned int offset; /* Freepointer offset (in word units) */ + unsigned int objsize; /* Size of an object (from kmem_cache) */ +#ifdef CONFIG_SLUB_STATS + unsigned stat[NR_SLUB_STAT_ITEMS]; +#endif +}; + struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; - atomic_long_t nr_slabs; struct list_head partial; +#ifdef CONFIG_SLUB_DEBUG + atomic_long_t nr_slabs; struct list_head full; +#endif }; /* @@ -28,7 +61,7 @@ struct kmem_cache { int size; /* The size of an object including meta data */ int objsize; /* The size of an object without meta data */ int offset; /* Free pointer offset. */ - unsigned int order; + int order; /* Current preferred allocation order */ /* * Avoid an extra cache line for UP, SMP and for the node local to @@ -38,54 +71,59 @@ struct kmem_cache { /* Allocation and freeing of slabs */ int objects; /* Number of objects in slab */ + gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ - void (*ctor)(void *, struct kmem_cache *, unsigned long); - void (*dtor)(void *, struct kmem_cache *, unsigned long); + void (*ctor)(struct kmem_cache *, void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ +#ifdef CONFIG_SLUB_DEBUG struct kobject kobj; /* For sysfs */ +#endif #ifdef CONFIG_NUMA - int defrag_ratio; + /* + * Defragmentation by allocating from a remote node. + */ + int remote_node_defrag_ratio; struct kmem_cache_node *node[MAX_NUMNODES]; #endif - struct page *cpu_slab[NR_CPUS]; +#ifdef CONFIG_SMP + struct kmem_cache_cpu *cpu_slab[NR_CPUS]; +#else + struct kmem_cache_cpu cpu_slab; +#endif }; /* * Kmalloc subsystem. */ -#define KMALLOC_SHIFT_LOW 3 - -#ifdef CONFIG_LARGE_ALLOCS -#define KMALLOC_SHIFT_HIGH 25 +#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 +#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN #else -#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 -#define KMALLOC_SHIFT_HIGH 20 -#else -#define KMALLOC_SHIFT_HIGH 18 -#endif +#define KMALLOC_MIN_SIZE 8 #endif +#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) + /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; /* * Sorry that the following has to be that ugly but some versions of GCC * have trouble with constant propagation and loops. */ -static inline int kmalloc_index(int size) +static __always_inline int kmalloc_index(size_t size) { - /* - * We should return 0 if size == 0 but we use the smallest object - * here for SLAB legacy reasons. - */ - WARN_ON_ONCE(size == 0); + if (!size) + return 0; + + if (size <= KMALLOC_MIN_SIZE) + return KMALLOC_SHIFT_LOW; if (size > 64 && size <= 96) return 1; @@ -101,23 +139,19 @@ static inline int kmalloc_index(int size) if (size <= 1024) return 10; if (size <= 2 * 1024) return 11; if (size <= 4 * 1024) return 12; +/* + * The following is only needed to support architectures with a larger page + * size than 4k. + */ if (size <= 8 * 1024) return 13; if (size <= 16 * 1024) return 14; if (size <= 32 * 1024) return 15; if (size <= 64 * 1024) return 16; if (size <= 128 * 1024) return 17; if (size <= 256 * 1024) return 18; -#if KMALLOC_SHIFT_HIGH > 18 - if (size <= 512 * 1024) return 19; + if (size <= 512 * 1024) return 19; if (size <= 1024 * 1024) return 20; -#endif -#if KMALLOC_SHIFT_HIGH > 20 if (size <= 2 * 1024 * 1024) return 21; - if (size <= 4 * 1024 * 1024) return 22; - if (size <= 8 * 1024 * 1024) return 23; - if (size <= 16 * 1024 * 1024) return 24; - if (size <= 32 * 1024 * 1024) return 25; -#endif return -1; /* @@ -135,21 +169,13 @@ static inline int kmalloc_index(int size) * This ought to end up with a global pointer to the right cache * in kmalloc_caches. */ -static inline struct kmem_cache *kmalloc_slab(size_t size) +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) { int index = kmalloc_index(size); if (index == 0) return NULL; - if (index < 0) { - /* - * Generate a link failure. Would be great if we could - * do something to stop the compile here. - */ - extern void __kmalloc_size_too_large(void); - __kmalloc_size_too_large(); - } return &kmalloc_caches[index]; } @@ -157,49 +183,51 @@ static inline struct kmem_cache *kmalloc_slab(size_t size) #define SLUB_DMA __GFP_DMA #else /* Disable DMA functionality */ -#define SLUB_DMA 0 +#define SLUB_DMA (__force gfp_t)0 #endif -static inline void *kmalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); +void *kmem_cache_alloc(struct kmem_cache *, gfp_t); +void *__kmalloc(size_t size, gfp_t flags); - if (!s) - return NULL; - - return kmem_cache_alloc(s, flags); - } else - return __kmalloc(size, flags); +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +{ + return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); } -static inline void *kzalloc(size_t size, gfp_t flags) +static __always_inline void *kmalloc(size_t size, gfp_t flags) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); + if (__builtin_constant_p(size)) { + if (size > PAGE_SIZE) + return kmalloc_large(size, flags); - if (!s) - return NULL; + if (!(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_zalloc(s, flags); - } else - return __kzalloc(size, flags); + if (!s) + return ZERO_SIZE_PTR; + + return kmem_cache_alloc(s, flags); + } + } + return __kmalloc(size, flags); } #ifdef CONFIG_NUMA -extern void *__kmalloc_node(size_t size, gfp_t flags, int node); +void *__kmalloc_node(size_t size, gfp_t flags, int node); +void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); + if (__builtin_constant_p(size) && + size <= PAGE_SIZE && !(flags & SLUB_DMA)) { + struct kmem_cache *s = kmalloc_slab(size); if (!s) - return NULL; + return ZERO_SIZE_PTR; return kmem_cache_alloc_node(s, flags, node); - } else - return __kmalloc_node(size, flags, node); + } + return __kmalloc_node(size, flags, node); } #endif