On 2024-02-04 6:59 am, Andrea della Porta wrote:
From: Phil Elwell <phil@xxxxxxxxxxxxxxx>
Unless the DMA mask is set wider than 32 bits, DMA mapping will use a
bounce buffer.
Signed-off-by: Phil Elwell <phil@xxxxxxxxxxxxxxx>
---
drivers/dma/bcm2835-dma.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 36bad198b655..237dcdb8d726 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -39,6 +39,7 @@
#define BCM2711_DMA_MEMCPY_CHAN 14
struct bcm2835_dma_cfg_data {
+ u64 dma_mask;
u32 chan_40bit_mask;
};
@@ -308,10 +309,12 @@ DEFINE_SPINLOCK(memcpy_lock);
static const struct bcm2835_dma_cfg_data bcm2835_dma_cfg = {
.chan_40bit_mask = 0,
+ .dma_mask = DMA_BIT_MASK(32),
};
static const struct bcm2835_dma_cfg_data bcm2711_dma_cfg = {
.chan_40bit_mask = BIT(11) | BIT(12) | BIT(13) | BIT(14),
+ .dma_mask = DMA_BIT_MASK(36),
};
static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
@@ -1263,6 +1266,8 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
static int bcm2835_dma_probe(struct platform_device *pdev)
{
+ const struct bcm2835_dma_cfg_data *cfg_data;
+ const struct of_device_id *of_id;
struct bcm2835_dmadev *od;
struct resource *res;
void __iomem *base;
@@ -1272,13 +1277,20 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
int irq_flags;
uint32_t chans_available;
char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
- const struct of_device_id *of_id;
int chan_count, chan_start, chan_end;
+ of_id = of_match_node(bcm2835_dma_of_match, pdev->dev.of_node);
+ if (!of_id) {
+ dev_err(&pdev->dev, "Failed to match compatible string\n");
+ return -EINVAL;
+ }
+
+ cfg_data = of_id->data;
We've had of_device_get_match_data() for nearly 9 years now, and even a
generic device_get_match_data() for 6 ;)
+
if (!pdev->dev.dma_mask)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
[ Passing nit: that also really shouldn't be there, especially since
cdfee5623290 ]
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, cfg_data->dma_mask);
Wait, does chan_40bit_mask mean that you still have some channels which
*can't* address this full mask? If so this can't work properly. You may
well need to redesign a bit further to have a separate DMA device for
each channel such they can each have different masks.
Thanks,
Robin.
if (rc) {
dev_err(&pdev->dev, "Unable to set DMA mask\n");
return rc;
@@ -1342,7 +1354,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- od->cfg_data = of_id->data;
+ od->cfg_data = cfg_data;
/* Request DMA channel mask from device tree */
if (of_property_read_u32(pdev->dev.of_node,