@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -103,6 +103,18 @@ struct iwl_rx_completion_desc {
u8 reserved2[25];
} __packed;
+/**
+ * struct iwl_rx_completion_desc_bz - Bz completion descriptor
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved: reserved
+ */
+struct iwl_rx_completion_desc_bz {
+ __le16 rbid;
+ u8 flags;
+ u8 reserved[1];
+} __packed;
+
/**
* struct iwl_rxq - Rx queue
* @id: queue index
@@ -133,11 +145,7 @@ struct iwl_rxq {
int id;
void *bd;
dma_addr_t bd_dma;
- union {
- void *used_bd;
- __le32 *bd_32;
- struct iwl_rx_completion_desc *cd;
- };
+ void *used_bd;
dma_addr_t used_bd_dma;
u32 read;
u32 write;
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -652,23 +652,30 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
-static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
{
- struct iwl_rx_transfer_desc *rx_td;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_transfer_desc);
- if (use_rx_td)
- return sizeof(*rx_td);
- else
- return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
- sizeof(__le32);
+ return trans->trans_cfg->mq_rx_supported ?
+ sizeof(__le64) : sizeof(__le32);
+}
+
+static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
+{
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return sizeof(struct iwl_rx_completion_desc_bz);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return sizeof(struct iwl_rx_completion_desc);
+
+ return sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- bool use_rx_td = (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210);
- int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ int free_size = iwl_pcie_free_bd_size(trans);
if (rxq->bd)
dma_free_coherent(trans->dev,
@@ -682,8 +689,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
if (rxq->used_bd)
dma_free_coherent(trans->dev,
- (use_rx_td ? sizeof(*rxq->cd) :
- sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
@@ -707,7 +714,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else
rxq->queue_size = RX_QUEUE_SIZE;
- free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+ free_size = iwl_pcie_free_bd_size(trans);
/*
* Allocate the circular buffer of Read Buffer Descriptors
@@ -720,7 +727,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->trans_cfg->mq_rx_supported) {
rxq->used_bd = dma_alloc_coherent(dev,
- (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
+ iwl_pcie_used_bd_size(trans) *
+ rxq->queue_size,
&rxq->used_bd_dma,
GFP_KERNEL);
if (!rxq->used_bd)
@@ -1417,6 +1425,7 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
u16 vid;
BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
if (!trans->trans_cfg->mq_rx_supported) {
rxb = rxq->queue[i];
@@ -1424,11 +1433,20 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
return rxb;
}
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- vid = le16_to_cpu(rxq->cd[i].rbid);
- *join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
+ } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_rx_completion_desc *cd = rxq->used_bd;
+
+ vid = le16_to_cpu(cd[i].rbid);
+ *join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
} else {
- vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
+ __le32 *cd = rxq->used_bd;
+
+ vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
}
if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))