Fix slub to check for CONFIG_MASK_ALLOC too Will all go away later once the last GFP_DMA users are gone Signed-off-by: Andi Kleen Index: linux/include/linux/slub_def.h =================================================================== --- linux.orig/include/linux/slub_def.h +++ linux/include/linux/slub_def.h @@ -179,7 +179,9 @@ static __always_inline struct kmem_cache return &kmalloc_caches[index]; } -#ifdef CONFIG_ZONE_DMA +/* MASK_ALLOC support will go away */ +#if defined(CONFIG_ZONE_DMA) || defined(CONFIG_MASK_ALLOC) +#define ENABLE_SLUB_DMA 1 #define SLUB_DMA __GFP_DMA #else /* Disable DMA functionality */ Index: linux/mm/slub.c =================================================================== --- linux.orig/mm/slub.c +++ linux/mm/slub.c @@ -2397,7 +2397,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); -#ifdef CONFIG_ZONE_DMA +#ifdef ENABLE_SLUB_DMA static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; #endif @@ -2459,7 +2459,7 @@ panic: panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); } -#ifdef CONFIG_ZONE_DMA +#ifdef ENABLE_SLUB_DMA static void sysfs_add_func(struct work_struct *w) { @@ -2568,7 +2568,7 @@ static struct kmem_cache *get_slab(size_ } else index = fls(size - 1); -#ifdef CONFIG_ZONE_DMA +#ifdef ENABLE_SLUB_DMA if (unlikely((flags & SLUB_DMA))) return dma_kmalloc_cache(index, flags); @@ -3802,7 +3802,7 @@ static ssize_t hwcache_align_show(struct } SLAB_ATTR_RO(hwcache_align); -#ifdef CONFIG_ZONE_DMA +#ifdef ENABLE_SLUB_DMA static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); @@ -4024,7 +4024,7 @@ static struct attribute *slab_attrs[] = &shrink_attr.attr, &alloc_calls_attr.attr, &free_calls_attr.attr, -#ifdef CONFIG_ZONE_DMA +#ifdef ENABLE_SLUB_DMA &cache_dma_attr.attr, #endif #ifdef CONFIG_NUMA