@@ -24,7 +24,7 @@ mt76_alloc_txwi(struct mt76_dev *dev)
if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+ addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
@@ -78,7 +78,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
local_bh_disable();
while ((t = __mt76_get_txwi(dev)) != NULL) {
- dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
kfree(mt76_get_txwi_ptr(dev, t));
}
@@ -127,7 +127,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
- q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
+ q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
if (!q->desc)
return -ENOMEM;
@@ -209,11 +209,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *e = &q->entry[idx];
if (!e->skip_buf0)
- dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
+ dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
if (!e->skip_buf1)
- dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
+ dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
if (e->txwi == DMA_DUMMY_DATA)
@@ -293,7 +293,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info)
*info = le32_to_cpu(desc->info);
- dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
e->buf = NULL;
return buf;
@@ -330,9 +330,9 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued + 1 >= q->ndesc - 1)
goto error;
- addr = dma_map_single(dev->dev, skb->data, skb->len,
+ addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto error;
buf.addr = addr;
@@ -379,8 +379,8 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
mt76_insert_hdr_pad(skb);
len = skb_headlen(skb);
- addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto free;
tx_info.buf[n].addr = t->dma_addr;
@@ -392,9 +392,9 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
- addr = dma_map_single(dev->dev, iter->data, iter->len,
+ addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr)))
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
goto unmap;
tx_info.buf[n].addr = addr;
@@ -407,10 +407,10 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
goto unmap;
}
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
if (ret < 0)
goto unmap;
@@ -420,7 +420,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
unmap:
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
tx_info.buf[n].len, DMA_TO_DEVICE);
free:
@@ -465,8 +465,8 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
if (!buf)
break;
- addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dev, addr))) {
+ addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
skb_free_frag(buf);
break;
}
@@ -545,6 +545,7 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
dev->hw = hw;
dev->dev = pdev;
dev->drv = drv_ops;
+ dev->dma_dev = pdev;
phy = &dev->phy;
phy->dev = dev;
@@ -698,6 +698,7 @@ struct mt76_dev {
const struct mt76_driver_ops *drv;
const struct mt76_mcu_ops *mcu_ops;
struct device *dev;
+ struct device *dma_dev;
struct mt76_mcu mcu;
WED support requires using non-coherent DMA, whereas the PCI device might be configured for coherent DMA. The WED driver will take care of changing the PCI HIF coherent IO setting on attach. Signed-off-by: Felix Fietkau <nbd@nbd.name> --- drivers/net/wireless/mediatek/mt76/dma.c | 34 +++++++++---------- drivers/net/wireless/mediatek/mt76/mac80211.c | 1 + drivers/net/wireless/mediatek/mt76/mt76.h | 1 + 3 files changed, 19 insertions(+), 17 deletions(-)