Prepare for the coming implementation by GCC and Clang of the __counted_by attribute. Flexible array members annotated with __counted_by can have their accesses bounds-checked at run-time checking via CONFIG_UBSAN_BOUNDS (for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family functions). As found with Coccinelle[1], add __counted_by for struct stm32_dma_desc. Additionally, since the element count member must be set before accessing the annotated flexible array member, move its initialization earlier. [1] https://github.com/kees/kernel-tools/blob/trunk/coccinelle/examples/counted_by.cocci Cc: Vinod Koul <vkoul@xxxxxxxxxx> Cc: Maxime Coquelin <mcoquelin.stm32@xxxxxxxxx> Cc: Alexandre Torgue <alexandre.torgue@xxxxxxxxxxx> Cc: dmaengine@xxxxxxxxxxxxxxx Cc: linux-stm32@xxxxxxxxxxxxxxxxxxxxxxxxxxxx Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx Signed-off-by: Kees Cook <keescook@xxxxxxxxxxxx> --- drivers/dma/stm32-dma.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 5c36811aa134..a732b3807b11 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -191,7 +191,7 @@ struct stm32_dma_desc { struct virt_dma_desc vdesc; bool cyclic; u32 num_sgs; - struct stm32_dma_sg_req sg_req[]; + struct stm32_dma_sg_req sg_req[] __counted_by(num_sgs); }; /** @@ -1105,6 +1105,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); if (!desc) return NULL; + desc->num_sgs = sg_len; /* Set peripheral flow controller */ if (chan->dma_sconfig.device_fc) @@ -1141,8 +1142,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( desc->sg_req[i].chan_reg.dma_sm1ar += sg_dma_len(sg); desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; } - - desc->num_sgs = sg_len; desc->cyclic = false; return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); @@ -1216,6 +1215,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT); if (!desc) return NULL; + desc->num_sgs = num_periods; for (i = 0; i < num_periods; i++) { desc->sg_req[i].len = period_len; @@ -1232,8 +1232,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( if (!chan->trig_mdma) buf_addr += period_len; } - - desc->num_sgs = num_periods; desc->cyclic = true; return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); @@ -1254,6 +1252,7 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); if (!desc) return NULL; + desc->num_sgs = num_sgs; threshold = chan->threshold; @@ -1283,8 +1282,6 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; desc->sg_req[i].len = xfer_count; } - - desc->num_sgs = num_sgs; desc->cyclic = false; return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); -- 2.34.1