@@ -563,14 +563,13 @@ static int veth_xdp_tx(struct veth_rq *rq, struct xdp_buff *xdp,
return 0;
}
-static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
- struct xdp_frame *frame,
- struct veth_xdp_tx_bq *bq,
- struct veth_stats *stats)
+static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
+ struct xdp_frame *frame,
+ struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
{
struct xdp_frame orig_frame;
struct bpf_prog *xdp_prog;
- struct sk_buff *skb;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
@@ -624,13 +623,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
}
rcu_read_unlock();
- skb = xdp_build_skb_from_frame(frame, rq->dev);
- if (!skb) {
- xdp_return_frame(frame);
- stats->rx_drops++;
- }
-
- return skb;
+ return frame;
err_xdp:
rcu_read_unlock();
xdp_return_frame(frame);
@@ -638,6 +631,48 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
return NULL;
}
+static void veth_xdp_rcv_batch(struct veth_rq *rq, void **frames,
+ int n_xdpf, struct veth_xdp_tx_bq *bq,
+ struct veth_stats *stats)
+{
+ void *skbs[XDP_BATCH_SIZE];
+ int i, n_skb = 0;
+
+ for (i = 0; i < n_xdpf; i++) {
+ struct xdp_frame *frame = frames[i];
+
+ stats->xdp_bytes += frame->len;
+ frame = veth_xdp_rcv_one(rq, frame, bq, stats);
+ if (frame)
+ frames[n_skb++] = frame;
+ }
+
+ if (!n_skb)
+ return;
+
+ if (xdp_alloc_skb_bulk(skbs, n_skb, GFP_ATOMIC) < 0) {
+ for (i = 0; i < n_skb; i++) {
+ xdp_return_frame(frames[i]);
+ stats->rx_drops++;
+ }
+ return;
+ }
+
+ for (i = 0; i < n_skb; i++) {
+ struct sk_buff *skb = skbs[i];
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb = __xdp_build_skb_from_frame(frames[i], skb,
+ rq->dev);
+ if (!skb) {
+ xdp_return_frame(frames[i]);
+ stats->rx_drops++;
+ continue;
+ }
+ napi_gro_receive(&rq->xdp_napi, skb);
+ }
+}
+
static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
struct sk_buff *skb,
struct veth_xdp_tx_bq *bq,
@@ -788,9 +823,10 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
int i, done = 0;
for (i = 0; i < budget; i++) {
+ int i, n_frame, n_xdpf = 0, n_skb = 0;
void *frames[VETH_XDP_BATCH];
void *skbs[VETH_XDP_BATCH];
- int i, n_frame, n_skb = 0;
+ void *xdpf[VETH_XDP_BATCH];
n_frame = __ptr_ring_consume_batched(&rq->xdp_ring, frames,
XDP_BATCH_SIZE);
@@ -798,24 +834,26 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
break;
for (i = 0; i < n_frame; i++) {
- void *f = frames[i];
- struct sk_buff *skb;
-
- if (veth_is_xdp_frame(f)) {
- struct xdp_frame *frame = veth_ptr_to_xdp(f);
-
- stats->xdp_bytes += frame->len;
- skb = veth_xdp_rcv_one(rq, frame, bq, stats);
- } else {
- skb = f;
- stats->xdp_bytes += skb->len;
- skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
- }
+ if (veth_is_xdp_frame(frames[i]))
+ xdpf[n_xdpf++] = veth_ptr_to_xdp(frames[i]);
+ else
+ skbs[n_skb++] = frames[i];
+ }
+
+ /* ndo_xdp_xmit */
+ if (n_xdpf)
+ veth_xdp_rcv_batch(rq, xdpf, n_xdpf, bq, stats);
+
+ /* ndo_start_xmit */
+ for (i = 0; i < n_skb; i++) {
+ struct sk_buff *skb = skbs[i];
+
+ stats->xdp_bytes += skb->len;
+ skb = veth_xdp_rcv_skb(rq, skb, bq, stats);
if (skb)
- skbs[n_skb++] = skb;
+ napi_gro_receive(&rq->xdp_napi, skb);
}
- for (i = 0; i < n_skb; i++)
- napi_gro_receive(&rq->xdp_napi, skbs[i]);
+
done += n_frame;
}
@@ -170,6 +170,7 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev);
struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct net_device *dev);
+int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
static inline
void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
@@ -514,6 +514,17 @@ void xdp_warn(const char *msg, const char *func, const int line)
};
EXPORT_SYMBOL_GPL(xdp_warn);
+int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
+{
+ n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
+ n_skb, skbs);
+ if (unlikely(!n_skb))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
+
struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
struct sk_buff *skb,
struct net_device *dev)
Split ndo_xdp_xmit and ndo_start_xmit use cases in veth_xdp_rcv routine in order to alloc skbs in bulk for XDP_PASS verdict. Introduce xdp_alloc_skb_bulk utility routine to alloc skb bulk list. Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> --- drivers/net/veth.c | 96 ++++++++++++++++++++++++++++++++-------------- include/net/xdp.h | 1 + net/core/xdp.c | 11 ++++++ 3 files changed, 79 insertions(+), 29 deletions(-)