Re: [RFC PATCH 2/2] dmaengine: altera: fix spinlock usage

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Sylvain,

On 18.09.2017 13:08, Sylvain Lesne wrote:
Since this lock is acquired in both process and IRQ context, failing to
to disable IRQs when trying to acquire the lock in process context can
lead to deadlocks.

Signed-off-by: Sylvain Lesne <lesne@xxxxxxxxxxx>
---
I'm not sure that this is the "smartest" fix, but I think something
should be done to fix the spinlock usage in this driver (lockdep
agrees!).

I'm a bit unsure here as well, as introducing the irqsave spinlocks
here will be a bit costly time-wise.

Perhaps its better to move the addition of new descriptors in the IDLE
state from the ISR back the IRQ tasklet, as I've initially proposed in
v2 of this patch:

https://patchwork.kernel.org/patch/9805967/

This way, the lock will not be used from IRQ context any more and
the "normal" spinlock calls should be enough.

What do you think?

Thanks,
Stefan

---
  drivers/dma/altera-msgdma.c | 35 +++++++++++++++++++++--------------
  1 file changed, 21 insertions(+), 14 deletions(-)

diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 35cbf2365f68..339186f25a2a 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -212,11 +212,12 @@ struct msgdma_device {
  static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
  {
  	struct msgdma_sw_desc *desc;
+	unsigned long flags;
- spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
  	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
  	list_del(&desc->node);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list); @@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
  	struct msgdma_device *mdev = to_mdev(tx->chan);
  	struct msgdma_sw_desc *new;
  	dma_cookie_t cookie;
+	unsigned long flags;
new = tx_to_desc(tx);
-	spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
  	cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
return cookie;
  }
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
  	struct msgdma_extended_desc *desc;
  	size_t copy;
  	u32 desc_cnt;
+	unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); - spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, irqflags);
  	if (desc_cnt > mdev->desc_free_cnt) {
  		spin_unlock_bh(&mdev->lock);
  		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
  		return NULL;
  	}
  	mdev->desc_free_cnt -= desc_cnt;
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, irqflags);
do {
  		/* Allocate and populate the descriptor */
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
  	u32 desc_cnt = 0, i;
  	struct scatterlist *sg;
  	u32 stride;
+	unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i)
  		desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
- spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, irqflags);
  	if (desc_cnt > mdev->desc_free_cnt) {
  		spin_unlock_bh(&mdev->lock);
  		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
  		return NULL;
  	}
  	mdev->desc_free_cnt -= desc_cnt;
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl); @@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
  static void msgdma_issue_pending(struct dma_chan *chan)
  {
  	struct msgdma_device *mdev = to_mdev(chan);
+	unsigned long flags;
- spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
  	msgdma_start_transfer(mdev);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
  }
/**
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
  static void msgdma_free_chan_resources(struct dma_chan *dchan)
  {
  	struct msgdma_device *mdev = to_mdev(dchan);
+	unsigned long flags;
- spin_lock_bh(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
  	msgdma_free_descriptors(mdev);
-	spin_unlock_bh(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
  	kfree(mdev->sw_desq);
  }
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
  	u32 count;
  	u32 __maybe_unused size;
  	u32 __maybe_unused status;
+	unsigned long flags;
- spin_lock(&mdev->lock);
+	spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */
  	count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
@@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
  		msgdma_chan_desc_cleanup(mdev);
  	}
- spin_unlock(&mdev->lock);
+	spin_unlock_irqrestore(&mdev->lock, flags);
  }
/**


Viele Grüße,
Stefan

--
DENX Software Engineering GmbH,      Managing Director: Wolfgang Denk
HRB 165235 Munich, Office: Kirchenstr.5, D-82194 Groebenzell, Germany
Phone: (+49)-8142-66989-51 Fax: (+49)-8142-66989-80 Email: sr@xxxxxxx
--
To unsubscribe from this list: send the line "unsubscribe dmaengine" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux Kernel]     [Linux ARM (vger)]     [Linux ARM MSM]     [Linux Omap]     [Linux Arm]     [Linux Tegra]     [Fedora ARM]     [Linux for Samsung SOC]     [eCos]     [Linux PCI]     [Linux Fastboot]     [Gcc Help]     [Git]     [DCCP]     [IETF Announce]     [Security]     [Linux MIPS]     [Yosemite Campsites]

  Powered by Linux