@@ -235,6 +235,16 @@ DECLARE_EWMA(signal, 10, 8);
#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
#define MT_WCID_TX_INFO_SET BIT(31)
+#define MT_PACKET_ID_MASK GENMASK(6, 0)
+#define MT_PACKET_ID_NO_ACK 0
+/* Request TXS, but don't try to match with skb. */
+#define MT_PACKET_ID_NO_SKB 1
+#define MT_PACKET_ID_FIRST 2
+#define MT_PACKET_ID_HAS_RATE BIT(7)
+#define MT_PACKET_ID_MAX (GENMASK(7, 0) - 1)
+#define MT_NUM_STATUS_PACKETS \
+ (MT_PACKET_ID_MAX + 1 - MT_PACKET_ID_FIRST)
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@@ -246,6 +256,8 @@ struct mt76_wcid {
struct rate_info rate;
+ struct sk_buff *skb_status_array[MT_NUM_STATUS_PACKETS];
+
u16 idx;
u8 hw_key_idx;
u8 hw_key_idx2;
@@ -302,16 +314,16 @@ struct mt76_rx_tid {
#define MT_TX_CB_TXS_DONE BIT(1)
#define MT_TX_CB_TXS_FAILED BIT(2)
-#define MT_PACKET_ID_MASK GENMASK(6, 0)
-#define MT_PACKET_ID_NO_ACK 0
-#define MT_PACKET_ID_NO_SKB 1
-#define MT_PACKET_ID_FIRST 2
-#define MT_PACKET_ID_HAS_RATE BIT(7)
-
-#define MT_TX_STATUS_SKB_TIMEOUT HZ
+/* This is timer for when to give up when waiting for TXS callback,
+ * with starting time being the time at which the DMA_DONE callback
+ * was seen (so, we know packet was processed then, it should not take
+ * long after that for firmware to send the TXS callback if it is going
+ * to do so.)
+ */
+#define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4)
struct mt76_tx_cb {
- unsigned long jiffies;
+ unsigned long jiffies; /* for when DMA_DONE was reported. */
u16 wcid;
u8 pktid;
u8 flags;
@@ -651,6 +663,7 @@ struct mt76_dev {
spinlock_t cc_lock;
u32 cur_cc_bss_rx;
+ unsigned long next_status_jiffies;
struct mt76_rx_status rx_ampdu_status;
u32 rx_ampdu_len;
@@ -1090,7 +1103,7 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
- struct sk_buff_head *list);
+ struct sk_buff_head *list, struct mt76_wcid *wcid);
void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb,
struct list_head *free_list);
static inline void
@@ -1297,13 +1310,24 @@ mt76_token_put(struct mt76_dev *dev, int token)
}
static inline int
-mt76_get_next_pkt_id(struct mt76_wcid *wcid)
+mt76_get_next_pkt_id(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct sk_buff *skb)
{
+ struct sk_buff *qskb;
+
+ lockdep_assert_held(&dev->status_list.lock);
+
wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
- if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
- wcid->packet_id == MT_PACKET_ID_NO_SKB)
+ if (wcid->packet_id < MT_PACKET_ID_FIRST)
wcid->packet_id = MT_PACKET_ID_FIRST;
+ qskb = wcid->skb_status_array[wcid->packet_id - MT_PACKET_ID_FIRST];
+ if (qskb)
+ /* bummer, already waiting on this pid. Other logic
+ * will clean up stale entries.
+ */
+ return MT_PACKET_ID_NO_SKB;
+
return wcid->packet_id;
}
#endif
@@ -1230,7 +1230,7 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
info->status.rates[0].idx = -1;
}
- mt76_tx_status_skb_done(mdev, skb, &list);
+ mt76_tx_status_skb_done(mdev, skb, &list, &sta->wcid);
}
mt76_tx_status_unlock(mdev, &list);
@@ -1433,7 +1433,7 @@ static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
info->status.rates[0].idx = -1;
}
- mt76_tx_status_skb_done(mdev, skb, &list);
+ mt76_tx_status_skb_done(mdev, skb, &list, &sta->wcid);
}
mt76_tx_status_unlock(mdev, &list);
@@ -622,7 +622,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
info = *status.info;
len = status.skb->len;
ac = skb_get_queue_mapping(status.skb);
- mt76_tx_status_skb_done(mdev, status.skb, &list);
+ mt76_tx_status_skb_done(mdev, status.skb, &list, wcid);
} else if (msta) {
len = status.info->status.ampdu_len * ewma_pktlen_read(&msta->pktlen);
ac = FIELD_GET(MT_PKTID_AC, cur_pktid);
@@ -1318,6 +1318,7 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+
if (!skb)
goto out;
@@ -1417,10 +1418,11 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
stats->tx_bw[0]++;
break;
}
+
wcid->rate = rate;
out:
- mt76_tx_status_skb_done(mdev, skb, &list);
+ mt76_tx_status_skb_done(mdev, skb, &list, wcid);
mt76_tx_status_unlock(mdev, &list);
return !!skb;
@@ -732,7 +732,9 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
txwi[7] |= cpu_to_le32(val);
}
-static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
+static void mt7921_update_txs(struct mt7921_dev *dev,
+ struct mt76_wcid *wcid, __le32 *txwi,
+ struct sk_buff *skb)
{
struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]);
@@ -744,7 +746,7 @@ static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
return;
msta->next_txs_ts = jiffies + msecs_to_jiffies(250);
- pid = mt76_get_next_pkt_id(wcid);
+ pid = mt76_get_next_pkt_id(&dev->mt76, wcid, skb);
txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU |
FIELD_PREP(MT_TXD5_PID, pid));
}
@@ -771,7 +773,6 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_phy *mphy = &dev->mphy;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
u16 tx_count = 15;
@@ -839,7 +840,7 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
- mt7921_update_txs(wcid, txwi);
+ mt7921_update_txs(dev, wcid, txwi, skb);
}
static void
@@ -36,6 +36,7 @@ mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
+/* Lock list, and initialize the timed-out-skb list object. */
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
__acquires(&dev->status_list.lock)
@@ -45,6 +46,9 @@ mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
+/* Unlock list, and use last-received status for any skbs that
+ * timed out getting TXS callback (they are on the list passed in
+ */
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.lock)
@@ -80,20 +84,44 @@ EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
static void
__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
- struct sk_buff_head *list)
+ struct sk_buff_head *list, struct mt76_wcid *wcid)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
+ lockdep_assert_held(&dev->status_list.lock);
+
+ /* Start timer for TXS callback expiration. */
+ if (flags & MT_TX_CB_DMA_DONE)
+ cb->jiffies = jiffies;
+
flags |= cb->flags;
cb->flags = flags;
+ /* Only process skb with TXS status has been received and also
+ * the txfree (DMA_DONE) callback has happened.
+ */
if ((flags & done) != done)
return;
__skb_unlink(skb, &dev->status_list);
+ rcu_read_lock();
+ /* calling code may not know wcid, for instance in the tx_status_check
+ * path, look it up in that case.
+ */
+ if (!wcid)
+ wcid = rcu_dereference(dev->wcid[cb->wcid]);
+
+ /* Make sure we clear any cached skb. */
+ if (wcid) {
+ if (!(WARN_ON_ONCE(cb->pktid - MT_PACKET_ID_FIRST
+ >= ARRAY_SIZE(wcid->skb_status_array))))
+ wcid->skb_status_array[cb->pktid - MT_PACKET_ID_FIRST] = NULL;
+ }
+ rcu_read_unlock();
+
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) {
info->status.rates[0].count = 0;
@@ -106,9 +134,9 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
void
mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
- struct sk_buff_head *list)
+ struct sk_buff_head *list, struct mt76_wcid *wcid)
{
- __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list, wcid);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
@@ -133,12 +161,16 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
spin_lock_bh(&dev->status_list.lock);
memset(cb, 0, sizeof(*cb));
- pid = mt76_get_next_pkt_id(wcid);
+ pid = mt76_get_next_pkt_id(dev, wcid, skb);
cb->wcid = wcid->idx;
cb->pktid = pid;
- cb->jiffies = jiffies;
- __skb_queue_tail(&dev->status_list, skb);
+ if (cb->pktid != MT_PACKET_ID_NO_SKB) {
+ __skb_queue_tail(&dev->status_list, skb);
+ /* cache this skb for fast lookup by packet-id */
+ wcid->skb_status_array[pid - MT_PACKET_ID_FIRST] = skb;
+ }
+
spin_unlock_bh(&dev->status_list.lock);
return pid;
@@ -150,25 +182,65 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list)
{
struct sk_buff *skb, *tmp;
+ struct sk_buff *rvskb = NULL;
+ /* If pktid is < first-valid-id, then it is not something we requested
+ * TXS for, so we will not find SKB. Bail out early in that case,
+ * unless we need to walk due to stale-skb-reaper timeout.
+ */
+ if (pktid < MT_PACKET_ID_FIRST) {
+ if (!time_after(jiffies, dev->next_status_jiffies))
+ return NULL;
+ goto check_list;
+ }
+
+ if (wcid) {
+ lockdep_assert_held(&dev->status_list.lock);
+ if (WARN_ON_ONCE(pktid - MT_PACKET_ID_FIRST
+ >= ARRAY_SIZE(wcid->skb_status_array))) {
+ dev_err(dev->dev, "invalid pktid: %d status-array-size: %d\n",
+ pktid, (int)(ARRAY_SIZE(wcid->skb_status_array)));
+ WARN_ON_ONCE(true);
+ goto check_list;
+ }
+
+ skb = wcid->skb_status_array[pktid - MT_PACKET_ID_FIRST];
+
+ if (skb && !time_after(jiffies, dev->next_status_jiffies))
+ return skb;
+ }
+
+check_list:
skb_queue_walk_safe(&dev->status_list, skb, tmp) {
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
if (wcid && cb->wcid != wcid->idx)
continue;
- if (cb->pktid == pktid)
- return skb;
+ if (cb->pktid == pktid) {
+ /* Found our skb, but check for timeouts too */
+ rvskb = skb;
+ continue;
+ }
+
+ /* Cannot time out anything that does not have DMA completed. */
+ if (!(cb->flags & MT_TX_CB_DMA_DONE))
+ continue;
+ /* Check if it has been too long since DMA_DONE was reported. */
if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
MT_TX_STATUS_SKB_TIMEOUT))
continue;
+ /* It has been too long since DMA_DONE, time out this packet
+ * and stop waiting for TXS callback.
+ */
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
- MT_TX_CB_TXS_DONE, list);
+ MT_TX_CB_TXS_DONE, list, wcid);
}
+ dev->next_status_jiffies = jiffies + MT_TX_STATUS_SKB_TIMEOUT + 1;
- return NULL;
+ return rvskb;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
@@ -238,7 +310,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
}
mt76_tx_status_lock(dev, &list);
- __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list, wcid);
mt76_tx_status_unlock(dev, &list);
out: