Fix slab to check for CONFIG_MASK_ALLOC too Will all go away later once the last GFP_DMA users are gone Signed-off-by: Andi Kleen Index: linux/include/linux/slab_def.h =================================================================== --- linux.orig/include/linux/slab_def.h +++ linux/include/linux/slab_def.h @@ -15,11 +15,19 @@ #include /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include +/* Mask allocator support will go away */ +#if defined(SLAB_DMA_ENABLED) || defined(CONFIG_MASK_ALLOC) +#define SLAB_DMA_ENABLED 1 +#define SLAB_DMA_ENABLED_FLAG 1 +#else +#define SLAB_DMA_ENABLED_FLAG 0 +#endif + /* Size description struct for general caches. */ struct cache_sizes { size_t cs_size; struct kmem_cache *cs_cachep; -#ifdef CONFIG_ZONE_DMA +#ifdef SLAB_DMA_ENABLED struct kmem_cache *cs_dmacachep; #endif }; @@ -48,7 +56,7 @@ static inline void *kmalloc(size_t size, __you_cannot_kmalloc_that_much(); } found: -#ifdef CONFIG_ZONE_DMA +#ifdef SLAB_DMA_ENABLED if (flags & GFP_DMA) return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, flags); @@ -82,7 +90,7 @@ static inline void *kmalloc_node(size_t __you_cannot_kmalloc_that_much(); } found: -#ifdef CONFIG_ZONE_DMA +#ifdef SLAB_DMA_ENABLED if (flags & GFP_DMA) return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, flags, node); Index: linux/mm/slab.c =================================================================== --- linux.orig/mm/slab.c +++ linux/mm/slab.c @@ -784,7 +784,7 @@ static inline struct kmem_cache *__find_ * has cs_{dma,}cachep==NULL. Thus no special case * for large kmalloc calls required. */ -#ifdef CONFIG_ZONE_DMA +#ifdef SLAB_DMA_ENABLED if (unlikely(gfpflags & GFP_DMA)) return csizep->cs_dmacachep; #endif @@ -1551,7 +1551,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); } -#ifdef CONFIG_ZONE_DMA +#ifdef SLAB_DMA_ENABLED sizes->cs_dmacachep = kmem_cache_create( names->name_dma, sizes->cs_size, @@ -2362,7 +2362,7 @@ kmem_cache_create (const char *name, siz cachep->slab_size = slab_size; cachep->flags = flags; cachep->gfpflags = 0; - if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) + if (SLAB_DMA_ENABLED_FLAG && (flags & SLAB_CACHE_DMA)) cachep->gfpflags |= GFP_DMA; cachep->buffer_size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); @@ -2688,7 +2688,7 @@ static void cache_init_objs(struct kmem_ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { - if (CONFIG_ZONE_DMA_FLAG) { + if (SLAB_DMA_ENABLED_FLAG) { if (flags & GFP_DMA) BUG_ON(!(cachep->gfpflags & GFP_DMA)); else