@@ -4243,6 +4243,7 @@ void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);
+struct skb_ext *skb_ext_cow(struct skb_ext *old, unsigned int old_active);
static inline void skb_ext_put(struct sk_buff *skb)
{
@@ -6282,14 +6282,11 @@ struct skb_ext *__skb_ext_alloc(gfp_t flags)
return new;
}
-static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
- unsigned int old_active)
+struct skb_ext *skb_ext_cow(struct skb_ext *old,
+ unsigned int old_active)
{
struct skb_ext *new;
- if (refcount_read(&old->refcnt) == 1)
- return old;
-
new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
if (!new)
return NULL;
@@ -6306,6 +6303,22 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
xfrm_state_hold(sp->xvec[i]);
}
#endif
+ return new;
+}
+EXPORT_SYMBOL(skb_ext_cow);
+
+static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
+ unsigned int old_active)
+{
+ struct skb_ext *new;
+
+ if (refcount_read(&old->refcnt) == 1)
+ return old;
+
+ new = skb_ext_cow(old, old_active);
+ if (!new)
+ return NULL;
+
__skb_ext_put(old);
return new;
}
@@ -251,6 +251,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
struct crypto_aead *aead;
struct esp_info esp;
bool hw_offload = true;
+ struct skb_ext *ext;
__u32 seq;
esp.inplace = true;
@@ -312,8 +313,21 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
- if (hw_offload)
+ if (hw_offload) {
+ ext = skb_ext_cow(skb->extensions, skb->active_extensions);
+ if (!ext)
+ return -ENOMEM;
+
+ __skb_ext_put(skb->extensions);
+ skb->extensions = ext;
+
+ xo = xfrm_offload(skb);
+ if (!xo)
+ return -EINVAL;
+
+ xo->flags |= XFRM_XMIT;
return 0;
+ }
err = esp_output_tail(x, skb, &esp);
if (err)
@@ -286,6 +286,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
struct xfrm_offload *xo;
struct crypto_aead *aead;
struct esp_info esp;
+ struct skb_ext *ext;
bool hw_offload = true;
__u32 seq;
@@ -346,8 +347,21 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features
ipv6_hdr(skb)->payload_len = htons(len);
- if (hw_offload)
+ if (hw_offload) {
+ ext = skb_ext_cow(skb->extensions, skb->active_extensions);
+ if (!ext)
+ return -ENOMEM;
+
+ __skb_ext_put(skb->extensions);
+ skb->extensions = ext;
+
+ xo = xfrm_offload(skb);
+ if (!xo)
+ return -EINVAL;
+
+ xo->flags |= XFRM_XMIT;
return 0;
+ }
err = esp6_output_tail(x, skb, &esp);
if (err)
@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
- xo->flags |= XFRM_XMIT;
-
if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
struct sk_buff *segs;
Commit 94579ac3f6d0 ("xfrm: Fix double ESP trailer insertion in IPsec crypto offload.") added a XFRM_XMIT flag to avoid duplicate ESP trailer insertion on HW offload. This flag is set on the secpath that is shared amongst segments. This lead to a situation where some segments are not transformed correctly when segmentation happens at layer 3. Fix this by using private skb extensions for segmented and hw offloaded ESP packets. Fixes: 94579ac3f6d0 ("xfrm: Fix double ESP trailer insertion in IPsec crypto offload.") Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com> --- include/linux/skbuff.h | 1 + net/core/skbuff.c | 23 ++++++++++++++++++----- net/ipv4/esp4_offload.c | 16 +++++++++++++++- net/ipv6/esp6_offload.c | 16 +++++++++++++++- net/xfrm/xfrm_device.c | 2 -- 5 files changed, 49 insertions(+), 9 deletions(-)