Run dma cleanup immediately if the queue is almost full, instead of waiting for the tx interrupt Signed-off-by: Felix Fietkau <nbd@xxxxxxxx> --- drivers/net/wireless/mediatek/mt76/dma.c | 3 +++ drivers/net/wireless/mediatek/mt76/mt76.h | 1 + drivers/net/wireless/mediatek/mt76/tx.c | 8 ++++++++ 3 files changed, 12 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 6255f4f0a455..73eeb00d5aa6 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -88,6 +88,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, int i; spin_lock_init(&q->lock); + spin_lock_init(&q->cleanup_lock); q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; q->ndesc = n_desc; @@ -225,6 +226,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) if (!q) return; + spin_lock_bh(&q->cleanup_lock); if (flush) last = -1; else @@ -243,6 +245,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) last = readl(&q->regs->dma_idx); } + spin_unlock_bh(&q->cleanup_lock); if (flush) { spin_lock_bh(&q->lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index ea80ae188dd6..3e496a188bf0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -126,6 +126,7 @@ struct mt76_queue { struct mt76_queue_regs __iomem *regs; spinlock_t lock; + spinlock_t cleanup_lock; struct mt76_queue_entry *entry; struct mt76_desc *desc; diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 4026e5f4f3f3..1e20afb70fc1 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -448,6 +448,7 @@ static int mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) { struct mt76_queue *q = phy->q_tx[qid]; + struct mt76_dev *dev = phy->dev; struct ieee80211_txq *txq; struct mt76_txq *mtxq; struct mt76_wcid *wcid; @@ -461,6 +462,13 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid) break; } + if (dev->queue_ops->tx_cleanup && + q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) { + spin_unlock_bh(&q->lock); + dev->queue_ops->tx_cleanup(dev, q, false); + spin_lock_bh(&q->lock); + } + if (mt76_txq_stopped(q)) break; -- 2.28.0