mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slub_def.h>
authorDavid Woodhouse <dwmw2@infradead.org>
Wed, 19 May 2010 11:02:14 +0000 (12:02 +0100)
committerPekka Enberg <penberg@cs.helsinki.fi>
Wed, 19 May 2010 19:03:13 +0000 (22:03 +0300)
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
include/linux/slub_def.h
mm/slub.c

index 0249d41..55695c8 100644 (file)
@@ -116,6 +116,14 @@ struct kmem_cache {
 
 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
 
+#ifndef ARCH_KMALLOC_MINALIGN
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
 /*
  * Maximum kmalloc object size handled by SLUB. Larger object allocations
  * are passed through to the page allocator. The page allocator "fastpath"
index d2a54fe..c874c3e 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
                SLAB_CACHE_DMA | SLAB_NOTRACK)
 
-#ifndef ARCH_KMALLOC_MINALIGN
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
 #define OO_SHIFT       16
 #define OO_MASK                ((1 << OO_SHIFT) - 1)
 #define MAX_OBJS_PER_PAGE      65535 /* since page.objects is u16 */