Hey Jisheng,
Shouldn't we be splitting until all DMA blocks are less than 128M boundary? I am a noob, but I think we should be prepared for boundaries that when split in two, will still be greater than 128M. Feel free to disagree but please explain why I may be wrong. Thank-you.
Sincerely,
Matthew Leon
On Sun, Jul 29, 2018 at 10:46 PM, Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx> wrote:
When using DMA, if the DMA addr spans 128MB boundary, we have to split
the DMA transfer into two so that each one doesn't exceed the boundary.
Signed-off-by: Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx>
---
drivers/mmc/host/sdhci-of-dwcmshc.c | 43 +++++++++++++++++++++++++++++
1 file changed, 43 insertions(+)
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of- dwcmshc.c
index 1b7cd144fb01..e890fc8f5284 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -8,21 +8,52 @@
*/
#include <linux/clk.h>
+#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/sizes.h>
#include "sdhci-pltfm.h"
+#define BOUNDARY_OK(addr, len) \
+ ((addr | (SZ_128M - 1)) == ((addr + len - 1) | (SZ_128M - 1)))
+
struct dwcmshc_priv {
struct clk *bus_clk;
};
+/*
+ * if DMA addr spans 128MB boundary, we split the DMA transfer into two
+ * so that the DMA transfer doesn't exceed the boundary.
+ */
+static unsigned int dwcmshc_adma_write_desc(struct sdhci_host *host,
+ void *desc, dma_addr_t addr,
+ int len, unsigned int cmd)
+{
+ int tmplen, offset;
+
+ if (likely(!len || BOUNDARY_OK(addr, len)))
+ return _sdhci_adma_write_desc(host, desc, addr, len, cmd);
+
+ offset = addr & (SZ_128M - 1);
+ tmplen = SZ_128M - offset;
+ _sdhci_adma_write_desc(host, desc, addr, tmplen, cmd);
+
+ addr += tmplen;
+ len -= tmplen;
+ desc += host->desc_sz;
+ _sdhci_adma_write_desc(host, desc, addr, len, cmd);
+
+ return host->desc_sz * 2;
+}
+
static const struct sdhci_ops sdhci_dwcmshc_ops = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.reset = sdhci_reset,
+ .adma_write_desc = dwcmshc_adma_write_desc,
};
static const struct sdhci_pltfm_data sdhci_dwcmshc_pdata = {
@@ -36,12 +67,24 @@ static int dwcmshc_probe(struct platform_device *pdev)
struct sdhci_host *host;
struct dwcmshc_priv *priv;
int err;
+ u32 extra;
host = sdhci_pltfm_init(pdev, &sdhci_dwcmshc_pdata,
sizeof(struct dwcmshc_priv));
if (IS_ERR(host))
return PTR_ERR(host);
+ /*
+ * The DMA descriptor table number is calculated as the maximum
+ * number of segments times 2, to allow for an alignment
+ * descriptor for each segment, plus 1 for a nop end descriptor,
+ * plus extra number for cross 128M boundary handling.
+ */
+ extra = DIV_ROUND_UP(totalram_pages, SZ_128M / PAGE_SIZE);
+ if (extra > SDHCI_MAX_SEGS)
+ extra = SDHCI_MAX_SEGS;
+ host->adma_table_num = SDHCI_MAX_SEGS * 2 + 1 + extra;
+
pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
--
2.18.0