With the DMA bouncing of unaligned kmalloc() buffers now in place, enable it for arm64 to allow the kmalloc-{8,16,32,48,96} caches. In addition, always create the swiotlb buffer even when the end of RAM is within the 32-bit physical address range (the swiotlb buffer can still be disabled on the kernel command line). Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> --- arch/arm64/Kconfig | 1 + arch/arm64/mm/init.c | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index d11340b41703..1dcfa78f131f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -102,6 +102,7 @@ config ARM64 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36) + select ARCH_WANT_KMALLOC_DMA_BOUNCE select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 66e70ca47680..3ac2e9d79ce4 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -442,7 +442,12 @@ void __init bootmem_init(void) */ void __init mem_init(void) { - swiotlb_init(max_pfn > PFN_DOWN(arm64_dma_phys_limit), SWIOTLB_VERBOSE); + bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit); + + if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC)) + swiotlb = true; + + swiotlb_init(swiotlb, SWIOTLB_VERBOSE); /* this will put all unused low memory onto the freelists */ memblock_free_all();