diff mbox series

[bpf-next,2/2] net: xdp: add xdp_update_skb_shared_info utility routine

Message ID 16f4244f5a506143f5becde501f1ecb120255b42.1625828537.git.lorenzo@kernel.org
State New
Headers show
Series Add xdp_update_skb_shared_info utility routine | expand

Commit Message

Lorenzo Bianconi July 9, 2021, 11:10 a.m. UTC
Introduce xdp_update_skb_shared_info routine to update frags array
metadata from a given xdp_buffer/xdp_frame. We do not need to reset
frags array since it is already initialized by the driver.
Rely on xdp_update_skb_shared_info in mvneta driver.

Suggested-by: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 29 +++++++++++++++------------
 include/net/xdp.h                     |  3 +++
 net/core/xdp.c                        | 27 +++++++++++++++++++++++++
 3 files changed, 46 insertions(+), 13 deletions(-)

Comments

John Fastabend July 12, 2021, 6:46 p.m. UTC | #1
Lorenzo Bianconi wrote:
> Introduce xdp_update_skb_shared_info routine to update frags array

> metadata from a given xdp_buffer/xdp_frame. We do not need to reset

> frags array since it is already initialized by the driver.

> Rely on xdp_update_skb_shared_info in mvneta driver.


Some more context here would really help. I had to jump into the mvneta
driver to see what is happening.

So as I read this we have a loop processing the descriptor in
mvneta_rx_swbm()

 mvneta_rx_swbm()
   while (rx_proc < budget && rx_proc < rx_todo) {
     if (rx_status & MVNETA_RXD_FIRST_DESC) ...
     else {
       mvneta_swbm_add_rx_fragment()
     }
     ..
     if (!rx_status & MVNETA_RXD_LAST_DESC)
         continue;
     ..
     if (xdp_prog)
       mvneta_run_xdp(...)
   }

roughly looking like above. First question, do you ever hit
!MVNETA_RXD_LAST_DESC today? I assume this is avoided by hardware
setup when XDP is enabled, otherwise _run_xdp() would be
broken correct? Next question, given last descriptor bit
logic whats the condition to hit the code added in this patch?
wouldn't we need more than 1 descriptor and then we would
skip the xdp_run... sorry lost me and its probably easier
to let you give the flow vs spending an hour trying to
track it down.

But, in theory as you handle a hardware discriptor you can build
up a set of pages using them to create a single skb rather than a
skb per descriptor. But don't we know if pfmemalloc should be
done while we are building the frag list? Can't se just set it
vs this for loop in xdp_update_skb_shared_info(),

> +	for (i = 0; i < nr_frags; i++) {

> +		struct page *page = skb_frag_page(&sinfo->frags[i]);

> +

> +		page = compound_head(page);

> +		if (page_is_pfmemalloc(page)) {

> +			skb->pfmemalloc = true;

> +			break;

> +		}

> +	}

> +}


...

> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c

> index 361bc4fbe20b..abf2e50880e0 100644

> --- a/drivers/net/ethernet/marvell/mvneta.c

> +++ b/drivers/net/ethernet/marvell/mvneta.c

> @@ -2294,18 +2294,29 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,

>  	rx_desc->buf_phys_addr = 0;

>  

>  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {

> -		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];

> +		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags];

>  

>  		skb_frag_off_set(frag, pp->rx_offset_correction);

>  		skb_frag_size_set(frag, data_len);

>  		__skb_frag_set_page(frag, page);

> +		/* We don't need to reset pp_recycle here. It's already set, so

> +		 * just mark fragments for recycling.

> +		 */

> +		page_pool_store_mem_info(page, rxq->page_pool);

> +

> +		/* first fragment */

> +		if (!xdp_sinfo->nr_frags)

> +			xdp_sinfo->gso_type = *size;


Would be nice to also change 'int size' -> 'unsigned int size' so the
types matched. Presumably you really can't have a negative size.

Also how about giving gso_type a better name. xdp_sinfo->size maybe?


> +		xdp_sinfo->nr_frags++;

>  

>  		/* last fragment */

>  		if (len == *size) {

>  			struct skb_shared_info *sinfo;

>  

>  			sinfo = xdp_get_shared_info_from_buff(xdp);

> +			sinfo->xdp_frags_tsize = xdp_sinfo->nr_frags * PAGE_SIZE;

>  			sinfo->nr_frags = xdp_sinfo->nr_frags;

> +			sinfo->gso_type = xdp_sinfo->gso_type;

>  			memcpy(sinfo->frags, xdp_sinfo->frags,

>  			       sinfo->nr_frags * sizeof(skb_frag_t));

>  		}


Thanks,
John
Lorenzo Bianconi July 12, 2021, 8:22 p.m. UTC | #2
> Lorenzo Bianconi wrote:

> > Introduce xdp_update_skb_shared_info routine to update frags array

> > metadata from a given xdp_buffer/xdp_frame. We do not need to reset

> > frags array since it is already initialized by the driver.

> > Rely on xdp_update_skb_shared_info in mvneta driver.

> 

> Some more context here would really help. I had to jump into the mvneta

> driver to see what is happening.


Hi John,

ack, you are right. I will add more context next time. Sorry for the noise.

> 

> So as I read this we have a loop processing the descriptor in

> mvneta_rx_swbm()

> 

>  mvneta_rx_swbm()

>    while (rx_proc < budget && rx_proc < rx_todo) {

>      if (rx_status & MVNETA_RXD_FIRST_DESC) ...

>      else {

>        mvneta_swbm_add_rx_fragment()

>      }

>      ..

>      if (!rx_status & MVNETA_RXD_LAST_DESC)

>          continue;

>      ..

>      if (xdp_prog)

>        mvneta_run_xdp(...)

>    }

> 

> roughly looking like above. First question, do you ever hit

> !MVNETA_RXD_LAST_DESC today? I assume this is avoided by hardware

> setup when XDP is enabled, otherwise _run_xdp() would be

> broken correct? Next question, given last descriptor bit

> logic whats the condition to hit the code added in this patch?

> wouldn't we need more than 1 descriptor and then we would

> skip the xdp_run... sorry lost me and its probably easier

> to let you give the flow vs spending an hour trying to

> track it down.


I will point it out in the new commit log, but this is a preliminary patch for
xdp multi-buff support. In the current codebase xdp_update_skb_shared_info()
is run just when the NIC is not running in XDP mode (please note
mvneta_swbm_add_rx_fragment() is run even if xdp_prog is NULL).
When we add xdp multi-buff support, xdp_update_skb_shared_info() will run even
in XDP mode since we will remove the MTU constraint.

In the current codebsae the following condition can occur in non-XDP mode if
the packet is split on 3 or more descriptors (e.g. MTU 9000):

if (!(rx_status & MVNETA_RXD_LAST_DESC))
   continue;

> 

> But, in theory as you handle a hardware discriptor you can build

> up a set of pages using them to create a single skb rather than a

> skb per descriptor. But don't we know if pfmemalloc should be

> done while we are building the frag list? Can't se just set it

> vs this for loop in xdp_update_skb_shared_info(),


I added pfmemalloc code in xdp_update_skb_shared_info() in order to reuse it
for the xdp_redirect use-case (e.g. whenever we redirect a xdp multi-buff
in a veth or in a cpumap). I have a pending patch where I am using
xdp_update_skb_shared_info in __xdp_build_skb_from_frame().

> 

> > +	for (i = 0; i < nr_frags; i++) {

> > +		struct page *page = skb_frag_page(&sinfo->frags[i]);

> > +

> > +		page = compound_head(page);

> > +		if (page_is_pfmemalloc(page)) {

> > +			skb->pfmemalloc = true;

> > +			break;

> > +		}

> > +	}

> > +}

> 

> ...

> 

> > diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c

> > index 361bc4fbe20b..abf2e50880e0 100644

> > --- a/drivers/net/ethernet/marvell/mvneta.c

> > +++ b/drivers/net/ethernet/marvell/mvneta.c

> > @@ -2294,18 +2294,29 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,

> >  	rx_desc->buf_phys_addr = 0;

> >  

> >  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {

> > -		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];

> > +		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags];

> >  

> >  		skb_frag_off_set(frag, pp->rx_offset_correction);

> >  		skb_frag_size_set(frag, data_len);

> >  		__skb_frag_set_page(frag, page);

> > +		/* We don't need to reset pp_recycle here. It's already set, so

> > +		 * just mark fragments for recycling.

> > +		 */

> > +		page_pool_store_mem_info(page, rxq->page_pool);

> > +

> > +		/* first fragment */

> > +		if (!xdp_sinfo->nr_frags)

> > +			xdp_sinfo->gso_type = *size;

> 

> Would be nice to also change 'int size' -> 'unsigned int size' so the

> types matched. Presumably you really can't have a negative size.

> 


ack

> Also how about giving gso_type a better name. xdp_sinfo->size maybe?


I did it in this way in order to avoid adding a union in skb_shared_info.
What about adding an inline helper to set/get it? e.g.

static inline u32 xdp_get_data_len(struct skb_shared_info *sinfo)
{
    return sinfo->gso_type;
}

static inline void xdp_set_data_len(struct skb_shared_info *sinfo, u32 datalen)
{
    sinfo->gso_type = datalen;
}

Regards,
Lorenzo

> 

> 

> > +		xdp_sinfo->nr_frags++;

> >  

> >  		/* last fragment */

> >  		if (len == *size) {

> >  			struct skb_shared_info *sinfo;

> >  

> >  			sinfo = xdp_get_shared_info_from_buff(xdp);

> > +			sinfo->xdp_frags_tsize = xdp_sinfo->nr_frags * PAGE_SIZE;

> >  			sinfo->nr_frags = xdp_sinfo->nr_frags;

> > +			sinfo->gso_type = xdp_sinfo->gso_type;

> >  			memcpy(sinfo->frags, xdp_sinfo->frags,

> >  			       sinfo->nr_frags * sizeof(skb_frag_t));

> >  		}

> 

> Thanks,

> John

>
John Fastabend July 14, 2021, 11:44 p.m. UTC | #3
Lorenzo Bianconi wrote:
> > Lorenzo Bianconi wrote:

> > > Introduce xdp_update_skb_shared_info routine to update frags array

> > > metadata from a given xdp_buffer/xdp_frame. We do not need to reset

> > > frags array since it is already initialized by the driver.

> > > Rely on xdp_update_skb_shared_info in mvneta driver.

> > 

> > Some more context here would really help. I had to jump into the mvneta

> > driver to see what is happening.

> 

> Hi John,

> 

> ack, you are right. I will add more context next time. Sorry for the noise.

> 

> > 

> > So as I read this we have a loop processing the descriptor in

> > mvneta_rx_swbm()

> > 

> >  mvneta_rx_swbm()

> >    while (rx_proc < budget && rx_proc < rx_todo) {

> >      if (rx_status & MVNETA_RXD_FIRST_DESC) ...

> >      else {

> >        mvneta_swbm_add_rx_fragment()

> >      }

> >      ..

> >      if (!rx_status & MVNETA_RXD_LAST_DESC)

> >          continue;

> >      ..

> >      if (xdp_prog)

> >        mvneta_run_xdp(...)

> >    }

> > 

> > roughly looking like above. First question, do you ever hit

> > !MVNETA_RXD_LAST_DESC today? I assume this is avoided by hardware

> > setup when XDP is enabled, otherwise _run_xdp() would be

> > broken correct? Next question, given last descriptor bit

> > logic whats the condition to hit the code added in this patch?

> > wouldn't we need more than 1 descriptor and then we would

> > skip the xdp_run... sorry lost me and its probably easier

> > to let you give the flow vs spending an hour trying to

> > track it down.

> 

> I will point it out in the new commit log, but this is a preliminary patch for

> xdp multi-buff support. In the current codebase xdp_update_skb_shared_info()

> is run just when the NIC is not running in XDP mode (please note

> mvneta_swbm_add_rx_fragment() is run even if xdp_prog is NULL).

> When we add xdp multi-buff support, xdp_update_skb_shared_info() will run even

> in XDP mode since we will remove the MTU constraint.

> 

> In the current codebsae the following condition can occur in non-XDP mode if

> the packet is split on 3 or more descriptors (e.g. MTU 9000):

> 

> if (!(rx_status & MVNETA_RXD_LAST_DESC))

>    continue;


But, as is there is no caller of xdp_update_skb_shared_info() so
I think we should move the these two patches into the series with
the multibuf support.

> 

> > 

> > But, in theory as you handle a hardware discriptor you can build

> > up a set of pages using them to create a single skb rather than a

> > skb per descriptor. But don't we know if pfmemalloc should be

> > done while we are building the frag list? Can't se just set it

> > vs this for loop in xdp_update_skb_shared_info(),

> 

> I added pfmemalloc code in xdp_update_skb_shared_info() in order to reuse it

> for the xdp_redirect use-case (e.g. whenever we redirect a xdp multi-buff

> in a veth or in a cpumap). I have a pending patch where I am using

> xdp_update_skb_shared_info in __xdp_build_skb_from_frame().


OK, but it adds an extra for loop and the related overhead. Can
we avoid this overhead and just set it from where we first
know we have a compound page. Or carry some bit through and
do a simpler check,

 if (pfmemalloc_needed) skb->pfmemalloc = true;

I guess in the case here its building the skb so performance is maybe
not as critical, but if it gets used in the redirect case then we
shouldn't be doing unnecessary for loops.

> 

> > 

> > > +	for (i = 0; i < nr_frags; i++) {

> > > +		struct page *page = skb_frag_page(&sinfo->frags[i]);

> > > +

> > > +		page = compound_head(page);

> > > +		if (page_is_pfmemalloc(page)) {

> > > +			skb->pfmemalloc = true;

> > > +			break;

> > > +		}

> > > +	}

> > > +}

> > 

> > ...

> > 

> > > diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c

> > > index 361bc4fbe20b..abf2e50880e0 100644

> > > --- a/drivers/net/ethernet/marvell/mvneta.c

> > > +++ b/drivers/net/ethernet/marvell/mvneta.c

> > > @@ -2294,18 +2294,29 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,

> > >  	rx_desc->buf_phys_addr = 0;

> > >  

> > >  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {

> > > -		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];

> > > +		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags];

> > >  

> > >  		skb_frag_off_set(frag, pp->rx_offset_correction);

> > >  		skb_frag_size_set(frag, data_len);

> > >  		__skb_frag_set_page(frag, page);

> > > +		/* We don't need to reset pp_recycle here. It's already set, so

> > > +		 * just mark fragments for recycling.

> > > +		 */

> > > +		page_pool_store_mem_info(page, rxq->page_pool);

> > > +

> > > +		/* first fragment */

> > > +		if (!xdp_sinfo->nr_frags)

> > > +			xdp_sinfo->gso_type = *size;

> > 

> > Would be nice to also change 'int size' -> 'unsigned int size' so the

> > types matched. Presumably you really can't have a negative size.

> > 

> 

> ack

> 

> > Also how about giving gso_type a better name. xdp_sinfo->size maybe?

> 

> I did it in this way in order to avoid adding a union in skb_shared_info.

> What about adding an inline helper to set/get it? e.g.


What was wrong with the union?

> 

> static inline u32 xdp_get_data_len(struct skb_shared_info *sinfo)

> {

>     return sinfo->gso_type;

> }

> 

> static inline void xdp_set_data_len(struct skb_shared_info *sinfo, u32 datalen)

> {

>     sinfo->gso_type = datalen;

> }

> 

> Regards,

> Lorenzo
Lorenzo Bianconi July 15, 2021, 11:27 p.m. UTC | #4
> Lorenzo Bianconi wrote:

> > > Lorenzo Bianconi wrote:

> > > > Introduce xdp_update_skb_shared_info routine to update frags array

> > > > metadata from a given xdp_buffer/xdp_frame. We do not need to reset

> > > > frags array since it is already initialized by the driver.

> > > > Rely on xdp_update_skb_shared_info in mvneta driver.

> > > 

> > > Some more context here would really help. I had to jump into the mvneta

> > > driver to see what is happening.

> > 

> > Hi John,

> > 

> > ack, you are right. I will add more context next time. Sorry for the noise.

> > 

> > > 

> > > So as I read this we have a loop processing the descriptor in

> > > mvneta_rx_swbm()

> > > 

> > >  mvneta_rx_swbm()

> > >    while (rx_proc < budget && rx_proc < rx_todo) {

> > >      if (rx_status & MVNETA_RXD_FIRST_DESC) ...

> > >      else {

> > >        mvneta_swbm_add_rx_fragment()

> > >      }

> > >      ..

> > >      if (!rx_status & MVNETA_RXD_LAST_DESC)

> > >          continue;

> > >      ..

> > >      if (xdp_prog)

> > >        mvneta_run_xdp(...)

> > >    }

> > > 

> > > roughly looking like above. First question, do you ever hit

> > > !MVNETA_RXD_LAST_DESC today? I assume this is avoided by hardware

> > > setup when XDP is enabled, otherwise _run_xdp() would be

> > > broken correct? Next question, given last descriptor bit

> > > logic whats the condition to hit the code added in this patch?

> > > wouldn't we need more than 1 descriptor and then we would

> > > skip the xdp_run... sorry lost me and its probably easier

> > > to let you give the flow vs spending an hour trying to

> > > track it down.

> > 

> > I will point it out in the new commit log, but this is a preliminary patch for

> > xdp multi-buff support. In the current codebase xdp_update_skb_shared_info()

> > is run just when the NIC is not running in XDP mode (please note

> > mvneta_swbm_add_rx_fragment() is run even if xdp_prog is NULL).

> > When we add xdp multi-buff support, xdp_update_skb_shared_info() will run even

> > in XDP mode since we will remove the MTU constraint.

> > 

> > In the current codebsae the following condition can occur in non-XDP mode if

> > the packet is split on 3 or more descriptors (e.g. MTU 9000):

> > 

> > if (!(rx_status & MVNETA_RXD_LAST_DESC))

> >    continue;

> 

> But, as is there is no caller of xdp_update_skb_shared_info() so

> I think we should move the these two patches into the series with

> the multibuf support.


mvneta is currently using it building the skb in mvneta_swbm_build_skb()
running in non-xdp mode but I am fine merging this series in the
multi-buff one.

> 

> > 

> > > 

> > > But, in theory as you handle a hardware discriptor you can build

> > > up a set of pages using them to create a single skb rather than a

> > > skb per descriptor. But don't we know if pfmemalloc should be

> > > done while we are building the frag list? Can't se just set it

> > > vs this for loop in xdp_update_skb_shared_info(),

> > 

> > I added pfmemalloc code in xdp_update_skb_shared_info() in order to reuse it

> > for the xdp_redirect use-case (e.g. whenever we redirect a xdp multi-buff

> > in a veth or in a cpumap). I have a pending patch where I am using

> > xdp_update_skb_shared_info in __xdp_build_skb_from_frame().

> 

> OK, but it adds an extra for loop and the related overhead. Can

> we avoid this overhead and just set it from where we first

> know we have a compound page. Or carry some bit through and

> do a simpler check,

> 

>  if (pfmemalloc_needed) skb->pfmemalloc = true;

> 

> I guess in the case here its building the skb so performance is maybe

> not as critical, but if it gets used in the redirect case then we

> shouldn't be doing unnecessary for loops.


doing so every driver will need to take care of it building the xdp_buff.
Does it work to do it since probably multi-buff is not critical for
performance?
In order to support xdp_redirect we need to save this info in
xdp_buff/xdp_frame, maybe in the flag field added in xdp multi-buff series.

> 

> > 

> > > 

> > > > +	for (i = 0; i < nr_frags; i++) {

> > > > +		struct page *page = skb_frag_page(&sinfo->frags[i]);

> > > > +

> > > > +		page = compound_head(page);

> > > > +		if (page_is_pfmemalloc(page)) {

> > > > +			skb->pfmemalloc = true;

> > > > +			break;

> > > > +		}

> > > > +	}

> > > > +}

> > > 

> > > ...

> > > 

> > > > diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c

> > > > index 361bc4fbe20b..abf2e50880e0 100644

> > > > --- a/drivers/net/ethernet/marvell/mvneta.c

> > > > +++ b/drivers/net/ethernet/marvell/mvneta.c

> > > > @@ -2294,18 +2294,29 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,

> > > >  	rx_desc->buf_phys_addr = 0;

> > > >  

> > > >  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {

> > > > -		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];

> > > > +		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags];

> > > >  

> > > >  		skb_frag_off_set(frag, pp->rx_offset_correction);

> > > >  		skb_frag_size_set(frag, data_len);

> > > >  		__skb_frag_set_page(frag, page);

> > > > +		/* We don't need to reset pp_recycle here. It's already set, so

> > > > +		 * just mark fragments for recycling.

> > > > +		 */

> > > > +		page_pool_store_mem_info(page, rxq->page_pool);

> > > > +

> > > > +		/* first fragment */

> > > > +		if (!xdp_sinfo->nr_frags)

> > > > +			xdp_sinfo->gso_type = *size;

> > > 

> > > Would be nice to also change 'int size' -> 'unsigned int size' so the

> > > types matched. Presumably you really can't have a negative size.

> > > 

> > 

> > ack

> > 

> > > Also how about giving gso_type a better name. xdp_sinfo->size maybe?

> > 

> > I did it in this way in order to avoid adding a union in skb_shared_info.

> > What about adding an inline helper to set/get it? e.g.

> 

> What was wrong with the union?


Alex requested to use gso_* fields already there (the union was in the previous
version I sent).

Regards,
Lorenzo

> 

> > 

> > static inline u32 xdp_get_data_len(struct skb_shared_info *sinfo)

> > {

> >     return sinfo->gso_type;

> > }

> > 

> > static inline void xdp_set_data_len(struct skb_shared_info *sinfo, u32 datalen)

> > {

> >     sinfo->gso_type = datalen;

> > }

> > 

> > Regards,

> > Lorenzo

>
John Fastabend July 16, 2021, 3:05 a.m. UTC | #5
Lorenzo Bianconi wrote:
> > Lorenzo Bianconi wrote:

> > > > Lorenzo Bianconi wrote:

> > > > > Introduce xdp_update_skb_shared_info routine to update frags array

> > > > > metadata from a given xdp_buffer/xdp_frame. We do not need to reset

> > > > > frags array since it is already initialized by the driver.

> > > > > Rely on xdp_update_skb_shared_info in mvneta driver.

> > > > 

> > > > Some more context here would really help. I had to jump into the mvneta

> > > > driver to see what is happening.

> > > 

> > > Hi John,

> > > 

> > > ack, you are right. I will add more context next time. Sorry for the noise.

> > > 

> > > > 

> > > > So as I read this we have a loop processing the descriptor in

> > > > mvneta_rx_swbm()

> > > > 

> > > >  mvneta_rx_swbm()

> > > >    while (rx_proc < budget && rx_proc < rx_todo) {

> > > >      if (rx_status & MVNETA_RXD_FIRST_DESC) ...

> > > >      else {

> > > >        mvneta_swbm_add_rx_fragment()

> > > >      }

> > > >      ..

> > > >      if (!rx_status & MVNETA_RXD_LAST_DESC)

> > > >          continue;

> > > >      ..

> > > >      if (xdp_prog)

> > > >        mvneta_run_xdp(...)

> > > >    }

> > > > 

> > > > roughly looking like above. First question, do you ever hit

> > > > !MVNETA_RXD_LAST_DESC today? I assume this is avoided by hardware

> > > > setup when XDP is enabled, otherwise _run_xdp() would be

> > > > broken correct? Next question, given last descriptor bit

> > > > logic whats the condition to hit the code added in this patch?

> > > > wouldn't we need more than 1 descriptor and then we would

> > > > skip the xdp_run... sorry lost me and its probably easier

> > > > to let you give the flow vs spending an hour trying to

> > > > track it down.

> > > 

> > > I will point it out in the new commit log, but this is a preliminary patch for

> > > xdp multi-buff support. In the current codebase xdp_update_skb_shared_info()

> > > is run just when the NIC is not running in XDP mode (please note

> > > mvneta_swbm_add_rx_fragment() is run even if xdp_prog is NULL).

> > > When we add xdp multi-buff support, xdp_update_skb_shared_info() will run even

> > > in XDP mode since we will remove the MTU constraint.

> > > 

> > > In the current codebsae the following condition can occur in non-XDP mode if

> > > the packet is split on 3 or more descriptors (e.g. MTU 9000):

> > > 

> > > if (!(rx_status & MVNETA_RXD_LAST_DESC))

> > >    continue;

> > 

> > But, as is there is no caller of xdp_update_skb_shared_info() so

> > I think we should move the these two patches into the series with

> > the multibuf support.

> 

> mvneta is currently using it building the skb in mvneta_swbm_build_skb()

> running in non-xdp mode but I am fine merging this series in the

> multi-buff one.


My preference is to add it where it will be used. So in the multi-buf
series.

> 

> > 

> > > 

> > > > 

> > > > But, in theory as you handle a hardware discriptor you can build

> > > > up a set of pages using them to create a single skb rather than a

> > > > skb per descriptor. But don't we know if pfmemalloc should be

> > > > done while we are building the frag list? Can't se just set it

> > > > vs this for loop in xdp_update_skb_shared_info(),

> > > 

> > > I added pfmemalloc code in xdp_update_skb_shared_info() in order to reuse it

> > > for the xdp_redirect use-case (e.g. whenever we redirect a xdp multi-buff

> > > in a veth or in a cpumap). I have a pending patch where I am using

> > > xdp_update_skb_shared_info in __xdp_build_skb_from_frame().

> > 

> > OK, but it adds an extra for loop and the related overhead. Can

> > we avoid this overhead and just set it from where we first

> > know we have a compound page. Or carry some bit through and

> > do a simpler check,

> > 

> >  if (pfmemalloc_needed) skb->pfmemalloc = true;

> > 

> > I guess in the case here its building the skb so performance is maybe

> > not as critical, but if it gets used in the redirect case then we

> > shouldn't be doing unnecessary for loops.

> 

> doing so every driver will need to take care of it building the xdp_buff.

> Does it work to do it since probably multi-buff is not critical for

> performance?


OK, but I think we need to improve performance in some of the 100Gbps
drivers. Work is in progress so any thing that has potential to slow
things down again I want to call out. I agree this might be OK and
only matters for nr_frags case.

> In order to support xdp_redirect we need to save this info in

> xdp_buff/xdp_frame, maybe in the flag field added in xdp multi-buff series.


Yeah I think that would work better if possible.

> 

> > 

> > > 

> > > > 

> > > > > +	for (i = 0; i < nr_frags; i++) {

> > > > > +		struct page *page = skb_frag_page(&sinfo->frags[i]);

> > > > > +

> > > > > +		page = compound_head(page);

> > > > > +		if (page_is_pfmemalloc(page)) {

> > > > > +			skb->pfmemalloc = true;

> > > > > +			break;

> > > > > +		}

> > > > > +	}

> > > > > +}

> > > > 

> > > > ...

> > > > 

> > > > > diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c

> > > > > index 361bc4fbe20b..abf2e50880e0 100644

> > > > > --- a/drivers/net/ethernet/marvell/mvneta.c

> > > > > +++ b/drivers/net/ethernet/marvell/mvneta.c

> > > > > @@ -2294,18 +2294,29 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,

> > > > >  	rx_desc->buf_phys_addr = 0;

> > > > >  

> > > > >  	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {

> > > > > -		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];

> > > > > +		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags];

> > > > >  

> > > > >  		skb_frag_off_set(frag, pp->rx_offset_correction);

> > > > >  		skb_frag_size_set(frag, data_len);

> > > > >  		__skb_frag_set_page(frag, page);

> > > > > +		/* We don't need to reset pp_recycle here. It's already set, so

> > > > > +		 * just mark fragments for recycling.

> > > > > +		 */

> > > > > +		page_pool_store_mem_info(page, rxq->page_pool);

> > > > > +

> > > > > +		/* first fragment */

> > > > > +		if (!xdp_sinfo->nr_frags)

> > > > > +			xdp_sinfo->gso_type = *size;

> > > > 

> > > > Would be nice to also change 'int size' -> 'unsigned int size' so the

> > > > types matched. Presumably you really can't have a negative size.

> > > > 

> > > 

> > > ack

> > > 

> > > > Also how about giving gso_type a better name. xdp_sinfo->size maybe?

> > > 

> > > I did it in this way in order to avoid adding a union in skb_shared_info.

> > > What about adding an inline helper to set/get it? e.g.

> > 

> > What was wrong with the union?

> 

> Alex requested to use gso_* fields already there (the union was in the previous

> version I sent).


@Alex, I think you were just saying union the gso_size field not the
tskey field.  Anyways its a fairly small nit on my side I don't care
much either way.

> 

> Regards,

> Lorenzo
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 361bc4fbe20b..abf2e50880e0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2294,18 +2294,29 @@  mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
 	rx_desc->buf_phys_addr = 0;
 
 	if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
-		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
+		skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags];
 
 		skb_frag_off_set(frag, pp->rx_offset_correction);
 		skb_frag_size_set(frag, data_len);
 		__skb_frag_set_page(frag, page);
+		/* We don't need to reset pp_recycle here. It's already set, so
+		 * just mark fragments for recycling.
+		 */
+		page_pool_store_mem_info(page, rxq->page_pool);
+
+		/* first fragment */
+		if (!xdp_sinfo->nr_frags)
+			xdp_sinfo->gso_type = *size;
+		xdp_sinfo->nr_frags++;
 
 		/* last fragment */
 		if (len == *size) {
 			struct skb_shared_info *sinfo;
 
 			sinfo = xdp_get_shared_info_from_buff(xdp);
+			sinfo->xdp_frags_tsize = xdp_sinfo->nr_frags * PAGE_SIZE;
 			sinfo->nr_frags = xdp_sinfo->nr_frags;
+			sinfo->gso_type = xdp_sinfo->gso_type;
 			memcpy(sinfo->frags, xdp_sinfo->frags,
 			       sinfo->nr_frags * sizeof(skb_frag_t));
 		}
@@ -2320,7 +2331,7 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
 		      struct xdp_buff *xdp, u32 desc_status)
 {
 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-	int i, num_frags = sinfo->nr_frags;
+	int num_frags = sinfo->nr_frags, size = sinfo->gso_type;
 	struct sk_buff *skb;
 
 	skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
@@ -2333,17 +2344,9 @@  mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
 	skb_put(skb, xdp->data_end - xdp->data);
 	skb->ip_summed = mvneta_rx_csum(pp, desc_status);
 
-	for (i = 0; i < num_frags; i++) {
-		skb_frag_t *frag = &sinfo->frags[i];
-
-		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-				skb_frag_page(frag), skb_frag_off(frag),
-				skb_frag_size(frag), PAGE_SIZE);
-		/* We don't need to reset pp_recycle here. It's already set, so
-		 * just mark fragments for recycling.
-		 */
-		page_pool_store_mem_info(skb_frag_page(frag), pool);
-	}
+	if (num_frags)
+		xdp_update_skb_shared_info(skb, num_frags, size,
+					   sinfo->xdp_frags_tsize, sinfo);
 
 	return skb;
 }
diff --git a/include/net/xdp.h b/include/net/xdp.h
index ad5b02dcb6f4..08d151ea8400 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -164,6 +164,9 @@  void xdp_warn(const char *msg, const char *func, const int line);
 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
 
 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
+void xdp_update_skb_shared_info(struct sk_buff *skb, int nr_frags,
+				int size, int truesize,
+				struct skb_shared_info *sinfo);
 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
 					   struct sk_buff *skb,
 					   struct net_device *dev);
diff --git a/net/core/xdp.c b/net/core/xdp.c
index cc92ccb38432..3f44c69e1f56 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -527,6 +527,33 @@  int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
 }
 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
 
+void xdp_update_skb_shared_info(struct sk_buff *skb, int nr_frags,
+				int size, int truesize,
+				struct skb_shared_info *sinfo)
+{
+	int i;
+
+	skb_shinfo(skb)->nr_frags = nr_frags;
+
+	skb->len += size;
+	skb->data_len += size;
+	skb->truesize += truesize;
+
+	if (skb->pfmemalloc)
+		return;
+
+	for (i = 0; i < nr_frags; i++) {
+		struct page *page = skb_frag_page(&sinfo->frags[i]);
+
+		page = compound_head(page);
+		if (page_is_pfmemalloc(page)) {
+			skb->pfmemalloc = true;
+			break;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(xdp_update_skb_shared_info);
+
 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
 					   struct sk_buff *skb,
 					   struct net_device *dev)