@@ -93,7 +93,7 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
{
int i;
- if (!q)
+ if (!q || !q->ndesc)
return;
/* clear descriptors */
@@ -233,7 +233,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
struct mt76_queue_entry entry;
int last;
- if (!q)
+ if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
@@ -448,6 +448,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
+ if (!q->ndesc)
+ return 0;
+
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
@@ -484,6 +487,9 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
void *buf;
bool more;
+ if (!q->ndesc)
+ return;
+
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
@@ -508,6 +514,9 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i;
+ if (!q->ndesc)
+ return;
+
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
@@ -85,6 +85,7 @@ enum mt76_rxq_id {
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
MT_RXQ_EXT_WA,
+ MT_RXQ_MAIN_WA,
__MT_RXQ_MAX
};