In accordance with [1] the DMA-able memory buffers must be cacheline-aligned otherwise the cache writing-back and invalidation performed during the mapping may cause the adjacent data being lost. It's specifically required for the DMA-noncoherent platforms. Seeing the opal_dev.{cmd,resp} buffers are used for DMAs in the NVME and SCSI/SD drivers in framework of the nvme_sec_submit() and sd_sec_submit() methods respectively we must make sure the passed buffers are cacheline-aligned to prevent the denoted problem. [1] Documentation/core-api/dma-api.rst Fixes: 455a7b238cd6 ("block: Add Sed-opal library") Signed-off-by: Serge Semin <Sergey.Semin@xxxxxxxxxxxxxxxxxxxx> --- block/sed-opal.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/block/sed-opal.c b/block/sed-opal.c index 9700197000f2..222acbd1f03a 100644 --- a/block/sed-opal.c +++ b/block/sed-opal.c @@ -73,6 +73,7 @@ struct parsed_resp { struct opal_resp_tok toks[MAX_TOKS]; }; +/* Presumably DMA-able buffers must be cache-aligned */ struct opal_dev { bool supported; bool mbr_enabled; @@ -88,8 +89,8 @@ struct opal_dev { u64 lowest_lba; size_t pos; - u8 cmd[IO_BUFFER_LENGTH]; - u8 resp[IO_BUFFER_LENGTH]; + u8 cmd[IO_BUFFER_LENGTH] ____cacheline_aligned; + u8 resp[IO_BUFFER_LENGTH] ____cacheline_aligned; struct parsed_resp parsed; size_t prev_d_len; -- 2.37.2