diff mbox series

wifi: mt76: mt7921: fix interference with kernel scheduler

Message ID 57c68a7ce1dd9022fa5e06af2c53d6313f30ec83.1731069062.git.quan.zhou@mediatek.com
State New
Headers show
Series wifi: mt76: mt7921: fix interference with kernel scheduler | expand

Commit Message

Quan Zhou Nov. 8, 2024, 12:59 p.m. UTC
In dma init or reset scene, need alloc buffer for all
rx ring unit, this is a time-consuming process. In fact,
a spinlock is not needed in these scenarios, add a new
API mt76_dma_rx_fill_buf which does not use a spinlock to
resolve this interference issue.

Signed-off-by: Quan Zhou <quan.zhou@mediatek.com>
Reviewed-by: Shayne Chen <shayne.chen@mediatek.com>
Reviewed-by: Deren Wu <deren.wu@mediatek.com>
---
 drivers/net/wireless/mediatek/mt76/dma.c | 22 +++++++++++++++++-----
 1 file changed, 17 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 5f46d6daeaa7..844af16ee551 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -631,7 +631,8 @@  mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
 	return ret;
 }
 
-int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+static int
+mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
 		     bool allow_direct)
 {
 	int len = SKB_WITH_OVERHEAD(q->buf_size);
@@ -640,8 +641,6 @@  int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
 	if (!q->ndesc)
 		return 0;
 
-	spin_lock_bh(&q->lock);
-
 	while (q->queued < q->ndesc - 1) {
 		struct mt76_queue_buf qbuf = {};
 		enum dma_data_direction dir;
@@ -674,6 +673,19 @@  int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
 	if (frames || mt76_queue_is_wed_rx(q))
 		mt76_dma_kick_queue(dev, q);
 
+	return frames;
+}
+
+int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+		     bool allow_direct)
+{
+	int frames;
+
+	if (!q->ndesc)
+		return 0;
+
+	spin_lock_bh(&q->lock);
+	frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
 	spin_unlock_bh(&q->lock);
 
 	return frames;
@@ -796,7 +808,7 @@  mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
 		return;
 
 	mt76_dma_sync_idx(dev, q);
-	mt76_dma_rx_fill(dev, q, false);
+	mt76_dma_rx_fill_buf(dev, q, false);
 }
 
 static void
@@ -969,7 +981,7 @@  mt76_dma_init(struct mt76_dev *dev,
 
 	mt76_for_each_q_rx(dev, i) {
 		netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
-		mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
+		mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
 		napi_enable(&dev->napi[i]);
 	}