@@ -127,6 +127,11 @@ enum skb_drop_reason {
* drop out of udp_memory_allocated.
*/
SKB_DROP_REASON_PROTO_MEM,
+ /**
+ * @SKB_DROP_REASON_TCP_AUTH_HDR: TCP-MD5 or TCP-AO hashes are met
+ * twice or set incorrectly.
+ */
+ SKB_DROP_REASON_TCP_AUTH_HDR,
/**
* @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected,
* corresponding to LINUX_MIB_TCPMD5NOTFOUND
@@ -142,6 +147,19 @@ enum skb_drop_reason {
* to LINUX_MIB_TCPMD5FAILURE
*/
SKB_DROP_REASON_TCP_MD5FAILURE,
+ /**
+ * @SKB_DROP_REASON_TCP_AONOTFOUND: no TCP-AO hash and one was expected
+ */
+ SKB_DROP_REASON_TCP_AONOTFOUND,
+ /**
+ * @SKB_DROP_REASON_TCP_AOUNEXPECTED: TCP-AO hash is present and it
+ * was not expected.
+ */
+ SKB_DROP_REASON_TCP_AOUNEXPECTED,
+ /** @SKB_DROP_REASON_TCP_AOKEYNOTFOUND: TCP-AO key is unknown */
+ SKB_DROP_REASON_TCP_AOKEYNOTFOUND,
+ /** @SKB_DROP_REASON_TCP_AOFAILURE: TCP-AO hash is wrong */
+ SKB_DROP_REASON_TCP_AOFAILURE,
/**
* @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog (
* see LINUX_MIB_TCPBACKLOGDROP)
@@ -1703,7 +1703,7 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
- int family, int dif, int sdif);
+ int family, int l3index, const __u8 *hash_location);
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
@@ -1725,7 +1725,7 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk,
static inline enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
- int family, int dif, int sdif)
+ int family, int l3index, const __u8 *hash_location)
{
return SKB_NOT_DROPPED_YET;
}
@@ -2100,6 +2100,10 @@ struct tcp_sock_af_ops {
const struct sock *sk,
__be32 sisn, __be32 disn,
bool send);
+ int (*ao_calc_key_skb)(struct tcp_ao_key *mkt,
+ u8 *key,
+ const struct sk_buff *skb,
+ __be32 sisn, __be32 disn);
#endif
};
@@ -2510,4 +2514,55 @@ static inline int tcp_parse_auth_options(const struct tcphdr *th,
return 0;
}
+/* Called with rcu_read_lock() */
+static inline enum skb_drop_reason
+tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb,
+ const void *saddr, const void *daddr,
+ int family, int dif, int sdif)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_ao_hdr *aoh;
+ const __u8 *md5_location;
+ int l3index;
+
+ /* Invalid option or two times meet any of auth options */
+ if (tcp_parse_auth_options(th, &md5_location, &aoh))
+ return SKB_DROP_REASON_TCP_AUTH_HDR;
+
+ if (req) {
+ if (tcp_rsk_used_ao(req) != !!aoh)
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ }
+
+ /* sdif set, means packet ingressed via a device
+ * in an L3 domain and dif is set to the l3mdev
+ */
+ l3index = sdif ? dif : 0;
+
+ /* Fast path: unsigned segments */
+ if (likely(!md5_location && !aoh)) {
+ /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
+ * for the remote peer. On TCP-AO established connection
+ * the last key is impossible to remove, so there's
+ * always at least one current_key.
+ */
+#ifdef CONFIG_TCP_AO
+ if (unlikely(tcp_ao_do_lookup(sk, saddr, family, -1, -1, 0)))
+ return SKB_DROP_REASON_TCP_AONOTFOUND;
+#endif
+ if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+ return SKB_DROP_REASON_TCP_MD5NOTFOUND;
+ }
+ return SKB_NOT_DROPPED_YET;
+ }
+
+ if (aoh)
+ return tcp_inbound_ao_hash(sk, skb, family, req, aoh);
+
+ return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
+ l3index, md5_location);
+}
+
#endif /* _TCP_H */
@@ -109,6 +109,9 @@ struct tcp6_ao_context {
__be32 disn;
};
+#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED|TCPF_FIN_WAIT1|TCPF_FIN_WAIT2|\
+ TCPF_CLOSE|TCPF_CLOSE_WAIT|TCPF_LAST_ACK|TCPF_CLOSING)
+
int tcp_ao_hash_skb(unsigned short int family,
char *ao_hash, struct tcp_ao_key *key,
const struct sock *sk, const struct sk_buff *skb,
@@ -126,6 +129,10 @@ u32 tcp_ao_compute_sne(u32 sne, u32 seq, u32 new_seq);
void tcp_ao_time_wait(struct tcp_timewait_sock *tcptw, struct tcp_sock *tp);
int tcp_ao_cache_traffic_keys(const struct sock *sk, struct tcp_ao_info *ao,
struct tcp_ao_key *ao_key);
+enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk,
+ const struct sk_buff *skb, unsigned short int family,
+ const struct request_sock *req,
+ const struct tcp_ao_hdr *aoh);
struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk,
const union tcp_ao_addr *addr,
int family, int sndid, int rcvid, u16 port);
@@ -147,9 +154,14 @@ int tcp_v4_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
__be32 sisn, __be32 disn, bool send);
int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
struct request_sock *req);
+int tcp_v4_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
+ const struct sk_buff *skb, __be32 sisn, __be32 disn);
struct tcp_ao_key *tcp_v4_ao_lookup_rsk(const struct sock *sk,
struct request_sock *req,
int sndid, int rcvid);
+bool tcp_v4_inbound_ao_hash(struct sock *sk,
+ struct request_sock *req,
+ const struct sk_buff *skb);
int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
const struct sock *sk, const struct sk_buff *skb,
const u8 *tkey, int hash_offset, u32 sne);
@@ -157,6 +169,9 @@ int tcp_v4_ao_hash_skb(char *ao_hash, struct tcp_ao_key *key,
int tcp_v6_ao_hash_pseudoheader(struct crypto_pool_ahash *hp,
const struct in6_addr *daddr,
const struct in6_addr *saddr, int nbytes);
+int tcp_v6_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
+ const struct sk_buff *skb, __be32 sisn,
+ __be32 disn);
int tcp_v6_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
const struct sock *sk, __be32 sisn,
__be32 disn, bool send);
@@ -192,6 +207,13 @@ static inline void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
{
}
+static inline enum skb_drop_reason tcp_inbound_ao_hash(struct sock *sk,
+ const struct sk_buff *skb, unsigned short int family,
+ const struct request_sock *req, const struct tcp_ao_hdr *aoh)
+{
+ return SKB_NOT_DROPPED_YET;
+}
+
static inline struct tcp_ao_key *tcp_ao_do_lookup(const struct sock *sk,
const union tcp_ao_addr *addr,
int family, int sndid, int rcvid, u16 port)
@@ -4515,42 +4515,23 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const void *saddr, const void *daddr,
- int family, int dif, int sdif)
+ int family, int l3index, const __u8 *hash_location)
{
- /*
- * This gets called for each TCP segment that arrives
- * so we want to be efficient.
+ /* This gets called for each TCP segment that has TCP-MD5 option.
* We have 3 drop cases:
* o No MD5 hash and one expected.
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
- const __u8 *hash_location = NULL;
- struct tcp_md5sig_key *hash_expected;
const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
- int genhash, l3index;
+ struct tcp_md5sig_key *key;
+ int genhash;
u8 newhash[16];
- /* sdif set, means packet ingressed via a device
- * in an L3 domain and dif is set to the l3mdev
- */
- l3index = sdif ? dif : 0;
-
- hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family);
- if (tcp_parse_auth_options(th, &hash_location, NULL))
- return true;
-
- /* We've parsed the options - do we have a hash? */
- if (!hash_expected && !hash_location)
- return SKB_NOT_DROPPED_YET;
-
- if (hash_expected && !hash_location) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
- return SKB_DROP_REASON_TCP_MD5NOTFOUND;
- }
+ key = tcp_md5_do_lookup(sk, l3index, saddr, family);
- if (!hash_expected && hash_location) {
+ if (!key && hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
}
@@ -4560,14 +4541,10 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
* IPv4-mapped case.
*/
if (family == AF_INET)
- genhash = tcp_v4_md5_hash_skb(newhash,
- hash_expected,
- NULL, skb);
+ genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
else
- genhash = tp->af_specific->calc_md5_hash(newhash,
- hash_expected,
+ genhash = tp->af_specific->calc_md5_hash(newhash, key,
NULL, skb);
-
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
if (family == AF_INET) {
@@ -335,6 +335,17 @@ int tcp_v4_ao_calc_key_rsk(struct tcp_ao_key *mkt, u8 *key,
htonl(tcp_rsk(req)->rcv_isn));
}
+int tcp_v4_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
+ const struct sk_buff *skb, __be32 sisn,
+ __be32 disn)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
+
+ return tcp_v4_ao_calc_key(mkt, key, iph->saddr, iph->daddr,
+ th->source, th->dest, sisn, disn);
+}
+
static int tcp_v4_ao_hash_pseudoheader(struct crypto_pool_ahash *hp,
__be32 daddr, __be32 saddr,
int nbytes)
@@ -681,6 +692,138 @@ void tcp_ao_syncookie(struct sock *sk, const struct sk_buff *skb,
treq->maclen = tcp_ao_maclen(key);
}
+static enum skb_drop_reason
+tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb,
+ unsigned short int family, struct tcp_ao_info *info,
+ const struct tcp_ao_hdr *aoh, struct tcp_ao_key *key,
+ u8 *traffic_key, u8 *phash, u32 sne)
+{
+ unsigned char newhash[TCP_AO_MAX_HASH_SIZE] __tcp_ao_key_align;
+ u8 maclen = aoh->length - sizeof(struct tcp_ao_hdr);
+ const struct tcphdr *th = tcp_hdr(skb);
+
+ if (maclen != tcp_ao_maclen(key))
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+
+ /* XXX: make it per-AF callback? */
+ tcp_ao_hash_skb(family, newhash, key, sk, skb, traffic_key,
+ (phash - (u8 *)th), sne);
+ if (memcmp(phash, newhash, maclen))
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ return SKB_NOT_DROPPED_YET;
+}
+
+enum skb_drop_reason
+tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
+ unsigned short int family, const struct request_sock *req,
+ const struct tcp_ao_hdr *aoh)
+{
+ const struct tcp_sock_af_ops *ops = tcp_sk(sk)->af_specific;
+ u8 key_buf[TCP_AO_MAX_HASH_SIZE] __tcp_ao_key_align;
+ const struct tcphdr *th = tcp_hdr(skb);
+ u8 *phash = (u8 *)(aoh + 1); /* hash goes just after the header */
+ struct tcp_ao_info *info;
+ struct tcp_ao_key *key;
+ __be32 sisn, disn;
+ u8 *traffic_key;
+ u32 sne = 0;
+
+ info = rcu_dereference(tcp_sk(sk)->ao_info);
+ if (!info)
+ return SKB_DROP_REASON_TCP_AOUNEXPECTED;
+
+ /* Fast-path */
+ /* TODO: fix fastopen and simultaneous open (TCPF_SYN_RECV) */
+ if (likely((1 << sk->sk_state) & (TCP_AO_ESTABLISHED | TCPF_SYN_RECV))) {
+ enum skb_drop_reason err;
+
+ /* Check if this socket's rnext_key matches the keyid in the
+ * packet. If not we lookup the key based on the keyid
+ * matching the rcvid in the mkt.
+ */
+ key = info->rnext_key;
+ if (key->rcvid != aoh->keyid) {
+ key = tcp_ao_do_lookup_rcvid(sk, aoh->keyid);
+ if (!key)
+ goto key_not_found;
+ }
+
+ if (unlikely(th->syn && !th->ack)) {
+ /* Delayed retransmitted syn */
+ sisn = th->seq;
+ disn = 0;
+ goto verify_hash;
+ }
+
+ sne = tcp_ao_compute_sne(info->rcv_sne, info->rcv_sne_seq,
+ ntohl(th->seq));
+ /* Established socket, traffic key are cached */
+ traffic_key = rcv_other_key(key);
+ err = tcp_ao_verify_hash(sk, skb, family, info, aoh, key,
+ traffic_key, phash, sne);
+ if (err)
+ return err;
+ /* Key rotation: the peer asks us to use new key (RNext) */
+ if (unlikely(aoh->rnext_keyid != info->current_key->sndid)) {
+ /* If the key is not found we do nothing. */
+ key = tcp_ao_do_lookup_sndid(sk, aoh->rnext_keyid);
+ if (key)
+ /* pairs with tcp_ao_del_cmd */
+ WRITE_ONCE(info->current_key, key);
+ }
+ return SKB_NOT_DROPPED_YET;
+ }
+
+ /* Lookup key based on peer address and keyid.
+ * current_key and rnext_key must not be used on tcp listen
+ * sockets as otherwise:
+ * - request sockets would race on those key pointers
+ * - tcp_ao_del_cmd() allows async key removal
+ */
+ key = tcp_ao_inbound_lookup(family, sk, skb, -1, aoh->keyid);
+ if (!key)
+ goto key_not_found;
+
+ if (th->syn && !th->ack) {
+ sisn = th->seq;
+ disn = 0;
+ goto verify_hash;
+ }
+
+ if (sk->sk_state == TCP_LISTEN) {
+ /* Make the initial syn the likely case here */
+ if (unlikely(req)) {
+ sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
+ ntohl(th->seq));
+ sisn = htonl(tcp_rsk(req)->rcv_isn);
+ disn = htonl(tcp_rsk(req)->snt_isn);
+ } else if (unlikely(th->ack && !th->syn)) {
+ /* Possible syncookie packet */
+ sisn = htonl(ntohl(th->seq) - 1);
+ disn = htonl(ntohl(th->ack_seq) - 1);
+ sne = tcp_ao_compute_sne(0, ntohl(sisn),
+ ntohl(th->seq));
+ }
+ } else if (sk->sk_state == TCP_SYN_SENT) {
+ disn = info->lisn;
+ if (th->syn)
+ sisn = th->seq;
+ else
+ sisn = info->risn;
+ } else {
+ WARN_ONCE(1, "TCP-AO: Unknown sk_state %d", sk->sk_state);
+ return SKB_DROP_REASON_TCP_AOFAILURE;
+ }
+verify_hash:
+ traffic_key = key_buf;
+ ops->ao_calc_key_skb(key, traffic_key, skb, sisn, disn);
+ return tcp_ao_verify_hash(sk, skb, family, info, aoh, key,
+ traffic_key, phash, sne);
+
+key_not_found:
+ return SKB_DROP_REASON_TCP_AOKEYNOTFOUND;
+}
+
int tcp_ao_cache_traffic_keys(const struct sock *sk, struct tcp_ao_info *ao,
struct tcp_ao_key *ao_key)
{
@@ -2205,9 +2205,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
drop_reason = SKB_DROP_REASON_XFRM_POLICY;
else
- drop_reason = tcp_inbound_md5_hash(sk, skb,
- &iph->saddr, &iph->daddr,
- AF_INET, dif, sdif);
+ drop_reason = tcp_inbound_hash(sk, req, skb,
+ &iph->saddr, &iph->daddr,
+ AF_INET, dif, sdif);
if (unlikely(drop_reason)) {
sk_drops_add(sk, skb);
reqsk_put(req);
@@ -2283,8 +2283,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_and_relse;
}
- drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr,
- &iph->daddr, AF_INET, dif, sdif);
+ drop_reason = tcp_inbound_hash(sk, NULL, skb, &iph->saddr, &iph->daddr,
+ AF_INET, dif, sdif);
if (drop_reason)
goto discard_and_relse;
@@ -2443,6 +2443,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.calc_ao_hash = tcp_v4_ao_hash_skb,
.ao_parse = tcp_v4_parse_ao,
.ao_calc_key_sk = tcp_v4_ao_calc_key_sk,
+ .ao_calc_key_skb = tcp_v4_ao_calc_key_skb,
#endif
};
#endif
@@ -39,6 +39,18 @@ int tcp_v6_ao_calc_key(struct tcp_ao_key *mkt, u8 *key,
return tcp_ao_calc_traffic_key(mkt, key, &tmp, sizeof(tmp));
}
+int tcp_v6_ao_calc_key_skb(struct tcp_ao_key *mkt, u8 *key,
+ const struct sk_buff *skb,
+ __be32 sisn, __be32 disn)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
+
+ return tcp_v6_ao_calc_key(mkt, key, &iph->saddr,
+ &iph->daddr, th->source,
+ th->dest, sisn, disn);
+}
+
int tcp_v6_ao_calc_key_sk(struct tcp_ao_key *mkt, u8 *key,
const struct sock *sk, __be32 sisn,
__be32 disn, bool send)
@@ -1825,9 +1825,9 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
struct sock *nsk;
sk = req->rsk_listener;
- drop_reason = tcp_inbound_md5_hash(sk, skb,
- &hdr->saddr, &hdr->daddr,
- AF_INET6, dif, sdif);
+ drop_reason = tcp_inbound_hash(sk, req, skb,
+ &hdr->saddr, &hdr->daddr,
+ AF_INET6, dif, sdif);
if (drop_reason) {
sk_drops_add(sk, skb);
reqsk_put(req);
@@ -1899,8 +1899,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
goto discard_and_relse;
}
- drop_reason = tcp_inbound_md5_hash(sk, skb, &hdr->saddr, &hdr->daddr,
- AF_INET6, dif, sdif);
+ drop_reason = tcp_inbound_hash(sk, NULL, skb, &hdr->saddr, &hdr->daddr,
+ AF_INET6, dif, sdif);
if (drop_reason)
goto discard_and_relse;
@@ -2093,6 +2093,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
.calc_ao_hash = tcp_v6_ao_hash_skb,
.ao_parse = tcp_v6_parse_ao,
.ao_calc_key_sk = tcp_v6_ao_calc_key_sk,
+ .ao_calc_key_skb = tcp_v6_ao_calc_key_skb,
#endif
};
#endif