If an architecture opted in to DMA bouncing of unaligned kmalloc() buffers (ARCH_WANT_KMALLOC_DMA_BOUNCE), reduce the minimum kmalloc() cache alignment below cache-line size to ARCH_KMALLOC_MINALIGN. Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Robin Murphy <robin.murphy@xxxxxxx> --- mm/slab_common.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index 7c6475847fdf..84e5a5e435d6 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -18,6 +18,7 @@ #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/dma-mapping.h> +#include <linux/swiotlb.h> #include <linux/proc_fs.h> #include <linux/debugfs.h> #include <linux/kasan.h> @@ -865,7 +866,13 @@ void __init setup_kmalloc_cache_index_table(void) static unsigned int __kmalloc_minalign(void) { - return dma_get_cache_alignment(); + int cache_align = dma_get_cache_alignment(); + + if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) || + io_tlb_default_mem.nslabs == 0) + return cache_align; + + return ARCH_KMALLOC_MINALIGN; } void __init