diff mbox series

[v4,bpf-next,6/6] xsk: build skb by page (aka generic zerocopy xmit)

Message ID 20210216113740.62041-7-alobakin@pm.me
State Superseded
Headers show
Series xsk: build skb by page (aka generic zerocopy xmit) | expand

Commit Message

Alexander Lobakin Feb. 16, 2021, 11:39 a.m. UTC
From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

This patch is used to construct skb based on page to save memory copy
overhead.

This function is implemented based on IFF_TX_SKB_NO_LINEAR. Only the
network card priv_flags supports IFF_TX_SKB_NO_LINEAR will use page to
directly construct skb. If this feature is not supported, it is still
necessary to copy data to construct skb.

---------------- Performance Testing ------------

The test environment is Aliyun ECS server.
Test cmd:
```
xdpsock -i eth0 -t  -S -s <msg size>
```

Test result data:

size    64      512     1024    1500
copy    1916747 1775988 1600203 1440054
page    1974058 1953655 1945463 1904478
percent 3.0%    10.0%   21.58%  32.3%

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
[ alobakin:
 - expand subject to make it clearer;
 - improve skb->truesize calculation;
 - reserve some headroom in skb for drivers;
 - tailroom is not needed as skb is non-linear ]
Signed-off-by: Alexander Lobakin <alobakin@pm.me>
---
 net/xdp/xsk.c | 119 ++++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 95 insertions(+), 24 deletions(-)

Comments

Alexander Lobakin Feb. 16, 2021, 2:15 p.m. UTC | #1
From: Magnus Karlsson <magnus.karlsson@gmail.com>
Date: Tue, 16 Feb 2021 15:08:26 +0100

> On Tue, Feb 16, 2021 at 12:44 PM Alexander Lobakin <alobakin@pm.me> wrote:
> >
> > From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> >
> > This patch is used to construct skb based on page to save memory copy
> > overhead.
> >
> > This function is implemented based on IFF_TX_SKB_NO_LINEAR. Only the
> > network card priv_flags supports IFF_TX_SKB_NO_LINEAR will use page to
> > directly construct skb. If this feature is not supported, it is still
> > necessary to copy data to construct skb.
> >
> > ---------------- Performance Testing ------------
> >
> > The test environment is Aliyun ECS server.
> > Test cmd:
> > ```
> > xdpsock -i eth0 -t  -S -s <msg size>
> > ```
> >
> > Test result data:
> >
> > size    64      512     1024    1500
> > copy    1916747 1775988 1600203 1440054
> > page    1974058 1953655 1945463 1904478
> > percent 3.0%    10.0%   21.58%  32.3%
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
> > [ alobakin:
> >  - expand subject to make it clearer;
> >  - improve skb->truesize calculation;
> >  - reserve some headroom in skb for drivers;
> >  - tailroom is not needed as skb is non-linear ]
> > Signed-off-by: Alexander Lobakin <alobakin@pm.me>
> 
> Thank you Alexander!
> 
> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>

Thanks!

I have one more generic zerocopy to offer (inspired by this series)
that wouldn't require IFF_TX_SKB_NO_LINEAR, only a capability to xmit
S/G packets that almost every NIC has. I'll publish an RFC once this
and your upcoming changes get merged.

> > ---
> >  net/xdp/xsk.c | 119 ++++++++++++++++++++++++++++++++++++++++----------
> >  1 file changed, 95 insertions(+), 24 deletions(-)
> >
> > diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> > index 143979ea4165..ff7bd06e1241 100644
> > --- a/net/xdp/xsk.c
> > +++ b/net/xdp/xsk.c
> > @@ -445,6 +445,96 @@ static void xsk_destruct_skb(struct sk_buff *skb)
> >         sock_wfree(skb);
> >  }
> >
> > +static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
> > +                                             struct xdp_desc *desc)
> > +{
> > +       struct xsk_buff_pool *pool = xs->pool;
> > +       u32 hr, len, offset, copy, copied;
> > +       struct sk_buff *skb;
> > +       struct page *page;
> > +       void *buffer;
> > +       int err, i;
> > +       u64 addr;
> > +
> > +       hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
> > +
> > +       skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
> > +       if (unlikely(!skb))
> > +               return ERR_PTR(err);
> > +
> > +       skb_reserve(skb, hr);
> > +
> > +       addr = desc->addr;
> > +       len = desc->len;
> > +
> > +       buffer = xsk_buff_raw_get_data(pool, addr);
> > +       offset = offset_in_page(buffer);
> > +       addr = buffer - pool->addrs;
> > +
> > +       for (copied = 0, i = 0; copied < len; i++) {
> > +               page = pool->umem->pgs[addr >> PAGE_SHIFT];
> > +               get_page(page);
> > +
> > +               copy = min_t(u32, PAGE_SIZE - offset, len - copied);
> > +               skb_fill_page_desc(skb, i, page, offset, copy);
> > +
> > +               copied += copy;
> > +               addr += copy;
> > +               offset = 0;
> > +       }
> > +
> > +       skb->len += len;
> > +       skb->data_len += len;
> > +       skb->truesize += pool->unaligned ? len : pool->chunk_size;
> > +
> > +       refcount_add(skb->truesize, &xs->sk.sk_wmem_alloc);
> > +
> > +       return skb;
> > +}
> > +
> > +static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
> > +                                    struct xdp_desc *desc)
> > +{
> > +       struct net_device *dev = xs->dev;
> > +       struct sk_buff *skb;
> > +
> > +       if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
> > +               skb = xsk_build_skb_zerocopy(xs, desc);
> > +               if (IS_ERR(skb))
> > +                       return skb;
> > +       } else {
> > +               u32 hr, tr, len;
> > +               void *buffer;
> > +               int err;
> > +
> > +               hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
> > +               tr = dev->needed_tailroom;
> > +               len = desc->len;
> > +
> > +               skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
> > +               if (unlikely(!skb))
> > +                       return ERR_PTR(err);
> > +
> > +               skb_reserve(skb, hr);
> > +               skb_put(skb, len);
> > +
> > +               buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
> > +               err = skb_store_bits(skb, 0, buffer, len);
> > +               if (unlikely(err)) {
> > +                       kfree_skb(skb);
> > +                       return ERR_PTR(err);
> > +               }
> > +       }
> > +
> > +       skb->dev = dev;
> > +       skb->priority = xs->sk.sk_priority;
> > +       skb->mark = xs->sk.sk_mark;
> > +       skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
> > +       skb->destructor = xsk_destruct_skb;
> > +
> > +       return skb;
> > +}
> > +
> >  static int xsk_generic_xmit(struct sock *sk)
> >  {
> >         struct xdp_sock *xs = xdp_sk(sk);
> > @@ -454,56 +544,37 @@ static int xsk_generic_xmit(struct sock *sk)
> >         struct sk_buff *skb;
> >         unsigned long flags;
> >         int err = 0;
> > -       u32 hr, tr;
> >
> >         mutex_lock(&xs->mutex);
> >
> >         if (xs->queue_id >= xs->dev->real_num_tx_queues)
> >                 goto out;
> >
> > -       hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
> > -       tr = xs->dev->needed_tailroom;
> > -
> >         while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
> > -               char *buffer;
> > -               u64 addr;
> > -               u32 len;
> > -
> >                 if (max_batch-- == 0) {
> >                         err = -EAGAIN;
> >                         goto out;
> >                 }
> >
> > -               len = desc.len;
> > -               skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
> > -               if (unlikely(!skb))
> > +               skb = xsk_build_skb(xs, &desc);
> > +               if (IS_ERR(skb)) {
> > +                       err = PTR_ERR(skb);
> >                         goto out;
> > +               }
> >
> > -               skb_reserve(skb, hr);
> > -               skb_put(skb, len);
> > -
> > -               addr = desc.addr;
> > -               buffer = xsk_buff_raw_get_data(xs->pool, addr);
> > -               err = skb_store_bits(skb, 0, buffer, len);
> >                 /* This is the backpressure mechanism for the Tx path.
> >                  * Reserve space in the completion queue and only proceed
> >                  * if there is space in it. This avoids having to implement
> >                  * any buffering in the Tx path.
> >                  */
> >                 spin_lock_irqsave(&xs->pool->cq_lock, flags);
> > -               if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
> > +               if (xskq_prod_reserve(xs->pool->cq)) {
> >                         spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
> >                         kfree_skb(skb);
> >                         goto out;
> >                 }
> >                 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
> >
> > -               skb->dev = xs->dev;
> > -               skb->priority = sk->sk_priority;
> > -               skb->mark = sk->sk_mark;
> > -               skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
> > -               skb->destructor = xsk_destruct_skb;
> > -
> >                 err = __dev_direct_xmit(skb, xs->queue_id);
> >                 if  (err == NETDEV_TX_BUSY) {
> >                         /* Tell user-space to retry the send */
> > --
> > 2.30.1

Al
diff mbox series

Patch

diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 143979ea4165..ff7bd06e1241 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -445,6 +445,96 @@  static void xsk_destruct_skb(struct sk_buff *skb)
 	sock_wfree(skb);
 }
 
+static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
+					      struct xdp_desc *desc)
+{
+	struct xsk_buff_pool *pool = xs->pool;
+	u32 hr, len, offset, copy, copied;
+	struct sk_buff *skb;
+	struct page *page;
+	void *buffer;
+	int err, i;
+	u64 addr;
+
+	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
+
+	skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
+	if (unlikely(!skb))
+		return ERR_PTR(err);
+
+	skb_reserve(skb, hr);
+
+	addr = desc->addr;
+	len = desc->len;
+
+	buffer = xsk_buff_raw_get_data(pool, addr);
+	offset = offset_in_page(buffer);
+	addr = buffer - pool->addrs;
+
+	for (copied = 0, i = 0; copied < len; i++) {
+		page = pool->umem->pgs[addr >> PAGE_SHIFT];
+		get_page(page);
+
+		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
+		skb_fill_page_desc(skb, i, page, offset, copy);
+
+		copied += copy;
+		addr += copy;
+		offset = 0;
+	}
+
+	skb->len += len;
+	skb->data_len += len;
+	skb->truesize += pool->unaligned ? len : pool->chunk_size;
+
+	refcount_add(skb->truesize, &xs->sk.sk_wmem_alloc);
+
+	return skb;
+}
+
+static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
+				     struct xdp_desc *desc)
+{
+	struct net_device *dev = xs->dev;
+	struct sk_buff *skb;
+
+	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
+		skb = xsk_build_skb_zerocopy(xs, desc);
+		if (IS_ERR(skb))
+			return skb;
+	} else {
+		u32 hr, tr, len;
+		void *buffer;
+		int err;
+
+		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
+		tr = dev->needed_tailroom;
+		len = desc->len;
+
+		skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
+		if (unlikely(!skb))
+			return ERR_PTR(err);
+
+		skb_reserve(skb, hr);
+		skb_put(skb, len);
+
+		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
+		err = skb_store_bits(skb, 0, buffer, len);
+		if (unlikely(err)) {
+			kfree_skb(skb);
+			return ERR_PTR(err);
+		}
+	}
+
+	skb->dev = dev;
+	skb->priority = xs->sk.sk_priority;
+	skb->mark = xs->sk.sk_mark;
+	skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
+	skb->destructor = xsk_destruct_skb;
+
+	return skb;
+}
+
 static int xsk_generic_xmit(struct sock *sk)
 {
 	struct xdp_sock *xs = xdp_sk(sk);
@@ -454,56 +544,37 @@  static int xsk_generic_xmit(struct sock *sk)
 	struct sk_buff *skb;
 	unsigned long flags;
 	int err = 0;
-	u32 hr, tr;
 
 	mutex_lock(&xs->mutex);
 
 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
 		goto out;
 
-	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
-	tr = xs->dev->needed_tailroom;
-
 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
-		char *buffer;
-		u64 addr;
-		u32 len;
-
 		if (max_batch-- == 0) {
 			err = -EAGAIN;
 			goto out;
 		}
 
-		len = desc.len;
-		skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
-		if (unlikely(!skb))
+		skb = xsk_build_skb(xs, &desc);
+		if (IS_ERR(skb)) {
+			err = PTR_ERR(skb);
 			goto out;
+		}
 
-		skb_reserve(skb, hr);
-		skb_put(skb, len);
-
-		addr = desc.addr;
-		buffer = xsk_buff_raw_get_data(xs->pool, addr);
-		err = skb_store_bits(skb, 0, buffer, len);
 		/* This is the backpressure mechanism for the Tx path.
 		 * Reserve space in the completion queue and only proceed
 		 * if there is space in it. This avoids having to implement
 		 * any buffering in the Tx path.
 		 */
 		spin_lock_irqsave(&xs->pool->cq_lock, flags);
-		if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
+		if (xskq_prod_reserve(xs->pool->cq)) {
 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 			kfree_skb(skb);
 			goto out;
 		}
 		spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
-		skb->dev = xs->dev;
-		skb->priority = sk->sk_priority;
-		skb->mark = sk->sk_mark;
-		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
-		skb->destructor = xsk_destruct_skb;
-
 		err = __dev_direct_xmit(skb, xs->queue_id);
 		if  (err == NETDEV_TX_BUSY) {
 			/* Tell user-space to retry the send */