@@ -913,6 +913,7 @@ struct iwl_txq {
/**
* struct iwl_trans_txqs - transport tx queues data
*
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
*/
@@ -922,6 +923,8 @@ struct iwl_trans_txqs {
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
struct dma_pool *bc_pool;
size_t bc_tbl_size;
+ bool bc_table_dword;
+
struct {
u8 fifo;
u8 q_id;
@@ -378,7 +378,6 @@ struct cont_rec {
* @cmd_queue - command queue number
* @def_rx_queue - default rx queue number
* @rx_buf_size: Rx buffer size
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
@@ -466,7 +465,6 @@ struct iwl_trans_pcie {
u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
- bool bc_table_dword;
bool scd_set_active;
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
@@ -1928,7 +1928,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
- trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+ trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
@@ -86,11 +86,10 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
@@ -115,7 +114,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
/* Starting from AX210, the HW expects bytes */
- WARN_ON(trans_pcie->bc_table_dword);
+ WARN_ON(trans->txqs.bc_table_dword);
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
@@ -123,7 +122,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
/* Before AX210, the HW expects DW */
- WARN_ON(!trans_pcie->bc_table_dword);
+ WARN_ON(!trans->txqs.bc_table_dword);
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
@@ -784,7 +783,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
@@ -231,7 +231,7 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
break;
}
- if (trans_pcie->bc_table_dword)
+ if (trans->txqs.bc_table_dword)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))