X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=include%2Flinux%2Fslab_def.h;h=850d057500dece3f52497b2bed3aab51dd677852;hb=cd65c3c7d1081290b7365897c2290a84aa967d4d;hp=455f9affea9abafe24c6be97a861d1839ee770a0;hpb=36994e58a48fb8f9651c7dc845a6de298aba5bfc;p=safe%2Fjmp%2Flinux-2.6 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 455f9af..850d057 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -14,7 +14,88 @@ #include /* kmalloc_sizes.h needs PAGE_SIZE */ #include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include -#include +#include + +/* + * struct kmem_cache + * + * manages a cache. + */ + +struct kmem_cache { +/* 1) per-cpu data, touched during every alloc/free */ + struct array_cache *array[NR_CPUS]; +/* 2) Cache tunables. Protected by cache_chain_mutex */ + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + + unsigned int buffer_size; + u32 reciprocal_buffer_size; +/* 3) touched by every alloc & free from the backend */ + + unsigned int flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + +/* 4) cache_grow/shrink */ + /* order of pgs per slab (2^n) */ + unsigned int gfporder; + + /* force GFP flags, e.g. GFP_DMA */ + gfp_t gfpflags; + + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + struct kmem_cache *slabp_cache; + unsigned int slab_size; + unsigned int dflags; /* dynamic flags */ + + /* constructor func */ + void (*ctor)(void *obj); + +/* 5) cache creation/removal */ + const char *name; + struct list_head next; + +/* 6) statistics */ +#ifdef CONFIG_DEBUG_SLAB + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + unsigned long node_overflow; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; + + /* + * If debugging is enabled, then the allocator can add additional + * fields and/or padding to every object. buffer_size contains the total + * object size including these internal fields, the following two + * variables contain the offset to the user object and its size. + */ + int obj_offset; + int obj_size; +#endif /* CONFIG_DEBUG_SLAB */ + + /* + * We put nodelists[] at the end of kmem_cache, because we want to size + * this array to nr_node_ids slots instead of MAX_NUMNODES + * (see kmem_cache_init()) + * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache + * is statically defined, so we reserve the max number of nodes. + */ + struct kmem_list3 *nodelists[MAX_NUMNODES]; + /* + * Do not add fields after nodelists[] + */ +}; /* Size description struct for general caches. */ struct cache_sizes { @@ -62,10 +143,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) i++; #include #undef CACHE - { - extern void __you_cannot_kmalloc_that_much(void); - __you_cannot_kmalloc_that_much(); - } + return NULL; found: #ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) @@ -76,8 +154,8 @@ found: ret = kmem_cache_alloc_notrace(cachep, flags); - kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, - size, slab_buffer_size(cachep), flags); + trace_kmalloc(_THIS_IP_, ret, + size, slab_buffer_size(cachep), flags); return ret; } @@ -120,10 +198,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) i++; #include #undef CACHE - { - extern void __you_cannot_kmalloc_that_much(void); - __you_cannot_kmalloc_that_much(); - } + return NULL; found: #ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) @@ -134,9 +209,9 @@ found: ret = kmem_cache_alloc_node_notrace(cachep, flags, node); - kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, - ret, size, slab_buffer_size(cachep), - flags, node); + trace_kmalloc_node(_THIS_IP_, ret, + size, slab_buffer_size(cachep), + flags, node); return ret; }