@@ -94,6 +94,7 @@ struct ieee80211_fragment_entry {
u8 rx_queue;
bool check_sequential_pn; /* needed for CCMP/GCMP */
u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+ unsigned int key_color;
};
@@ -646,6 +646,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct sta_info *sta)
{
struct ieee80211_local *local = sdata->local;
+ static atomic_t key_color = ATOMIC_INIT(0);
struct ieee80211_key *old_key;
int idx = key->conf.keyidx;
bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
@@ -680,6 +681,12 @@ int ieee80211_key_link(struct ieee80211_key *key,
key->sdata = sdata;
key->sta = sta;
+ /*
+ * Assign a unique ID to every key so we can easily prevent mixed
+ * key and fragment cache attacks.
+ */
+ key->color = atomic_inc_return(&key_color);
+
increment_tailroom_need_count(sdata);
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -123,6 +123,8 @@ struct ieee80211_key {
} debugfs;
#endif
+ unsigned int color;
+
/*
* key config, must be last because it contains key
* material as variable length member
@@ -1869,6 +1869,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
* next fragment has a sequential PN value.
*/
entry->check_sequential_pn = true;
+ entry->key_color = rx->key->color;
memcpy(entry->last_pn,
rx->key->u.ccmp.rx_pn[queue],
IEEE80211_CCMP_PN_LEN);
@@ -1906,6 +1907,11 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
if (!requires_sequential_pn(rx, fc))
return RX_DROP_UNUSABLE;
+
+ /* Prevent mixed key and fragment cache attacks */
+ if (entry->key_color != rx->key->color)
+ return RX_DROP_UNUSABLE;
+
memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
pn[i]++;