dma_map_single should only be called on buffers that have suitable DMA alignment, otherwise cache maintenance may corrupt adjacent memory due the cache line granularity. Let's verify this is indeed the case at runtime. Signed-off-by: Ahmad Fatoum <a.fatoum@xxxxxxxxxxxxxx> --- drivers/dma/debug.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/dma/debug.c b/drivers/dma/debug.c index f4272e8f5822..32a417504479 100644 --- a/drivers/dma/debug.c +++ b/drivers/dma/debug.c @@ -128,6 +128,10 @@ void debug_dma_map(struct device *dev, void *addr, list_add(&entry->list, &dma_mappings); dma_debug(entry, "allocated\n"); + + if (!IS_ALIGNED(dev_addr, DMA_ALIGNMENT)) + dma_dev_warn(dev, "Mapping insufficiently aligned %s buffer 0x%llx+0x%zx: %u bytes required!\n", + dir2name[direction], (u64)addr, size, DMA_ALIGNMENT); } void debug_dma_unmap(struct device *dev, dma_addr_t addr, -- 2.39.2