[RESEND PATCH 1/2] : Fix DMAR Error NO_PASID when IOMMU is enabled

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi,

We had a "DMAR Error  NO PASID" error reported in the kernel's log
when the IOMMU was enabled.

This is due to the missing WriteBack area for the C2H stream.
Below my patch.
One point : I didn't compile it within the latest kernel's sources'
tree as it is an extract of our backport of the XDMA support.
Feel free to contact me on any issue with this.

Hope this helps,
Eric.

Below my patch (corrected).

>From b8d71851e6a146dcb448b01a671f455afb09ae90 Mon Sep 17 00:00:00 2001
From: Eric DEBIEF <debief@xxxxxxxxxxxx>
Date: Wed, 22 May 2024 12:33:06 +0200
Subject: FIX: DMAR Error with IO_MMU enabled.

C2H write-back area was not allocated and set.
This leads to the DMAR Error.

Add the Writeback structure, allocate and set it as
the descriptors's Src field.
Done for all preps functions.

Signed-off-by: Eric DEBIEF <debief@xxxxxxxxxxxx>
---
 drivers/dma/xilinx/xdma.c | 44 +++++++++++++++++++++++++++++++++++----
 1 file changed, 40 insertions(+), 4 deletions(-)

diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 74d4a953b50f..9ae615165cb6 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -51,6 +51,20 @@ struct xdma_desc_block {
     dma_addr_t    dma_addr;
 };

+/**
+ * struct xdma_c2h_write_back  - Write back block , written by the XDMA.
+ * @magic_status_bit : magic (0x52B4) once written
+ * @length: effective transfer length (in bytes)
+ * @PADDING to be aligned on 32 bytes
+ * @associated dma address
+ */
+struct xdma_c2h_write_back {
+    __le32 magic_status_bit;
+    __le32 length;
+    u32 padding_1[6];
+    dma_addr_t dma_addr;
+};
+
 /**
  * struct xdma_chan - Driver specific DMA channel structure
  * @vchan: Virtual channel
@@ -61,6 +75,8 @@ struct xdma_desc_block {
  * @dir: Transferring direction of the channel
  * @cfg: Transferring config of the channel
  * @irq: IRQ assigned to the channel
+ * @write_back : C2H meta data write back
+
  */
 struct xdma_chan {
     struct virt_dma_chan        vchan;
@@ -73,6 +89,7 @@ struct xdma_chan {
     u32                irq;
     struct completion        last_interrupt;
     bool                stop_requested;
+    struct xdma_c2h_write_back *write_back;
 };

 /**
@@ -628,7 +645,8 @@ xdma_prep_device_sg(struct dma_chan *chan, struct
scatterlist *sgl,
         src = &addr;
         dst = &dev_addr;
     } else {
-        dev_addr = xdma_chan->cfg.src_addr;
+        dev_addr = xdma_chan->cfg.src_addr ?
+            xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr;
         src = &dev_addr;
         dst = &addr;
     }
@@ -705,7 +723,8 @@ xdma_prep_dma_cyclic(struct dma_chan *chan,
dma_addr_t address,
         src = &addr;
         dst = &dev_addr;
     } else {
-        dev_addr = xdma_chan->cfg.src_addr;
+        dev_addr = xdma_chan->cfg.src_addr ?
+            xdma_chan->cfg.src_addr : xdma_chan->write_back->dma_addr;
         src = &dev_addr;
         dst = &addr;
     }
@@ -803,6 +822,9 @@ static void xdma_free_chan_resources(struct dma_chan *chan)
     struct xdma_chan *xdma_chan = to_xdma_chan(chan);

     vchan_free_chan_resources(&xdma_chan->vchan);
+    dma_pool_free(xdma_chan->desc_pool,
+                xdma_chan->write_back,
+               xdma_chan->write_back->dma_addr);
     dma_pool_destroy(xdma_chan->desc_pool);
     xdma_chan->desc_pool = NULL;
 }
@@ -816,6 +838,7 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
     struct xdma_chan *xdma_chan = to_xdma_chan(chan);
     struct xdma_device *xdev = xdma_chan->xdev_hdl;
     struct device *dev = xdev->dma_dev.dev;
+    dma_addr_t write_back_addr;

     while (dev && !dev_is_pci(dev))
         dev = dev->parent;
@@ -824,13 +847,26 @@ static int xdma_alloc_chan_resources(struct
dma_chan *chan)
         return -EINVAL;
     }

-    xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev,
XDMA_DESC_BLOCK_SIZE,
-                           XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
+    //Allocate the pool WITH the H2C write back
+    xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
+                            dev,
+                            XDMA_DESC_BLOCK_SIZE +
+                                sizeof(struct xdma_c2h_write_back),
+                            XDMA_DESC_BLOCK_ALIGN,
+                            XDMA_DESC_BLOCK_BOUNDARY);
     if (!xdma_chan->desc_pool) {
         xdma_err(xdev, "unable to allocate descriptor pool");
         return -ENOMEM;
     }

+    /* Allocate the C2H write back out of the pool*/
+    xdma_chan->write_back = dma_pool_alloc(xdma_chan->desc_pool,
GFP_NOWAIT, &write_back_addr);
+    if (!xdma_chan->write_back) {
+        xdma_err(xdev, "unable to allocate C2H write back block");
+        return -ENOMEM;
+    }
+    xdma_chan->write_back->dma_addr = write_back_addr;
+
     return 0;
 }

--
2.34.1




[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux