Enable GFP_DMA in sl[au]b.c for mask allocator too v2 Before all kmalloc users are converted even the mask allocator still needs GFP_DMA support in the slab allocators. So enable it even for !CONFIG_ZONE_DMA but with CONFIG_MASK_ALLOC This code will go away. v2: Fix SLUB_DMA (Michael Buesch) Signed-off-by: Andi Kleen --- include/linux/slab_def.h | 6 +++--- include/linux/slub_def.h | 2 +- mm/slab.c | 12 ++++++++---- mm/slub.c | 14 +++++++++----- 4 files changed, 21 insertions(+), 13 deletions(-) Index: linux/mm/slab.c =================================================================== --- linux.orig/mm/slab.c +++ linux/mm/slab.c @@ -125,6 +125,10 @@ * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible) */ +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) +#define LOW_DMA 1 +#endif + #ifdef CONFIG_DEBUG_SLAB #define DEBUG 1 #define STATS 1 @@ -784,7 +788,7 @@ static inline struct kmem_cache *__find_ * has cs_{dma,}cachep==NULL. Thus no special case * for large kmalloc calls required. */ -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA if (unlikely(gfpflags & GFP_DMA)) return csizep->cs_dmacachep; #endif @@ -1551,7 +1555,7 @@ void __init kmem_cache_init(void) ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); } -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA sizes->cs_dmacachep = kmem_cache_create( names->name_dma, sizes->cs_size, @@ -2362,7 +2366,7 @@ kmem_cache_create (const char *name, siz cachep->slab_size = slab_size; cachep->flags = flags; cachep->gfpflags = 0; - if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) + if (LOW_DMA && (flags & SLAB_CACHE_DMA)) cachep->gfpflags |= GFP_DMA; cachep->buffer_size = size; cachep->reciprocal_buffer_size = reciprocal_value(size); @@ -2688,7 +2692,7 @@ static void cache_init_objs(struct kmem_ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { - if (CONFIG_ZONE_DMA_FLAG) { + if (LOW_DMA) { if (flags & GFP_DMA) BUG_ON(!(cachep->gfpflags & GFP_DMA)); else Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c +++ linux/mm/slub.c @@ -100,6 +100,10 @@ * the fast path and disables lockless freelists. */ +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) +#define LOW_DMA 1 +#endif + #define FROZEN (1 << PG_active) #ifdef CONFIG_SLUB_DEBUG @@ -2397,7 +2401,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; #endif @@ -2459,7 +2463,7 @@ panic: panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); } -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA static void sysfs_add_func(struct work_struct *w) { @@ -2568,7 +2572,7 @@ static struct kmem_cache *get_slab(size_ } else index = fls(size - 1); -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA if (unlikely((flags & SLUB_DMA))) return dma_kmalloc_cache(index, flags); @@ -3802,7 +3806,7 @@ static ssize_t hwcache_align_show(struct } SLAB_ATTR_RO(hwcache_align); -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); @@ -4024,7 +4028,7 @@ static struct attribute *slab_attrs[] = &shrink_attr.attr, &alloc_calls_attr.attr, &free_calls_attr.attr, -#ifdef CONFIG_ZONE_DMA +#ifdef LOW_DMA &cache_dma_attr.attr, #endif #ifdef CONFIG_NUMA Index: linux/include/linux/slab_def.h =================================================================== --- linux.orig/include/linux/slab_def.h +++ linux/include/linux/slab_def.h @@ -19,7 +19,7 @@ struct cache_sizes { size_t cs_size; struct kmem_cache *cs_cachep; -#ifdef CONFIG_ZONE_DMA +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) struct kmem_cache *cs_dmacachep; #endif }; @@ -48,7 +48,7 @@ static inline void *kmalloc(size_t size, __you_cannot_kmalloc_that_much(); } found: -#ifdef CONFIG_ZONE_DMA +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) if (flags & GFP_DMA) return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, flags); @@ -82,7 +82,7 @@ static inline void *kmalloc_node(size_t __you_cannot_kmalloc_that_much(); } found: -#ifdef CONFIG_ZONE_DMA +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) if (flags & GFP_DMA) return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, flags, node); Index: linux/include/linux/slub_def.h =================================================================== --- linux.orig/include/linux/slub_def.h +++ linux/include/linux/slub_def.h @@ -179,7 +179,7 @@ static __always_inline struct kmem_cache return &kmalloc_caches[index]; } -#ifdef CONFIG_ZONE_DMA +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) #define SLUB_DMA __GFP_DMA #else /* Disable DMA functionality */