@@ -805,6 +805,8 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3)
+ * @devmem: indicates that all the fragments in this skb are backed by
+ * device memory.
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required
@@ -991,7 +993,7 @@ struct sk_buff {
#if IS_ENABLED(CONFIG_IP_SCTP)
__u8 csum_not_inet:1;
#endif
-
+ __u8 devmem:1;
#if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS)
__u16 tc_index; /* traffic control index */
#endif
@@ -1766,6 +1768,12 @@ static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb)
__skb_zcopy_downgrade_managed(skb);
}
+/* Return true if frags in this skb are not readable by the host. */
+static inline bool skb_frags_not_readable(const struct sk_buff *skb)
+{
+ return skb->devmem;
+}
+
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
@@ -2468,6 +2476,10 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
struct page *page, int off, int size)
{
__skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);
+ if (page_is_page_pool_iov(page)) {
+ skb->devmem = true;
+ return;
+ }
/* Propagate page pfmemalloc to the skb if we can. The problem is
* that not all callers have unique ownership of the page but rely
@@ -1012,7 +1012,7 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
{
- return likely(!TCP_SKB_CB(skb)->eor);
+ return likely(!TCP_SKB_CB(skb)->eor && !skb_frags_not_readable(skb));
}
static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
@@ -1020,7 +1020,8 @@ static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
{
return likely(tcp_skb_can_collapse_to(to) &&
mptcp_skb_can_collapse(to, from) &&
- skb_pure_zcopy_same(to, from));
+ skb_pure_zcopy_same(to, from) &&
+ skb_frags_not_readable(to) == skb_frags_not_readable(from));
}
/* Events passed to congestion control interface */
@@ -425,6 +425,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
return 0;
}
+ if (skb_frags_not_readable(skb))
+ goto short_copy;
+
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -616,6 +619,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
{
int frag;
+ if (skb_frags_not_readable(skb))
+ return -EFAULT;
+
if (msg && msg->msg_ubuf && msg->sg_from_iter)
return msg->sg_from_iter(sk, skb, from, length);
@@ -390,6 +390,9 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
{
struct skb_shared_info *pinfo = skb_shinfo(skb);
+ if (WARN_ON_ONCE(skb_frags_not_readable(skb)))
+ return;
+
BUG_ON(skb->end - skb->tail < grow);
memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
@@ -411,7 +414,7 @@ static void gro_try_pull_from_frag0(struct sk_buff *skb)
{
int grow = skb_gro_offset(skb) - skb_headlen(skb);
- if (grow > 0)
+ if (grow > 0 && !skb_frags_not_readable(skb))
gro_pull_from_frag0(skb, grow);
}
@@ -1230,6 +1230,14 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
struct page *p;
u8 *vaddr;
+ if (skb_frag_is_page_pool_iov(frag)) {
+ printk("%sskb frag %d: not readable\n", level, i);
+ len -= frag->bv_len;
+ if (!len)
+ break;
+ continue;
+ }
+
skb_frag_foreach_page(frag, skb_frag_off(frag),
skb_frag_size(frag), p, p_off, p_len,
copied) {
@@ -1807,6 +1815,9 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
return -EINVAL;
+ if (skb_frags_not_readable(skb))
+ return -EFAULT;
+
if (!num_frags)
goto release;
@@ -1977,8 +1988,12 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
unsigned int size = skb_end_offset(skb) + skb->data_len;
- struct sk_buff *n = __alloc_skb(size, gfp_mask,
- skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+ struct sk_buff *n;
+
+ if (skb_frags_not_readable(skb))
+ return NULL;
+
+ n = __alloc_skb(size, gfp_mask, skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -2304,14 +2319,16 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom,
gfp_t gfp_mask)
{
- /*
- * Allocate the copy buffer
- */
- struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask, skb_alloc_rx_flag(skb),
- NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
+ struct sk_buff *n;
+
+ if (skb_frags_not_readable(skb))
+ return NULL;
+
+ /* Allocate the copy buffer */
+ n = __alloc_skb(newheadroom + skb->len + newtailroom, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -2650,6 +2667,9 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
*/
int i, k, eat = (skb->tail + delta) - skb->end;
+ if (skb_frags_not_readable(skb))
+ return NULL;
+
if (eat > 0 || skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
GFP_ATOMIC))
@@ -2803,6 +2823,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
to += copy;
}
+ if (skb_frags_not_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
@@ -2991,6 +3014,9 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
/*
* then map the fragments
*/
+ if (skb_frags_not_readable(skb))
+ return false;
+
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
@@ -3214,6 +3240,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
from += copy;
}
+ if (skb_frags_not_readable(skb))
+ goto fault;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int end;
@@ -3293,6 +3322,9 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
pos = copy;
}
+ if (skb_frags_not_readable(skb))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -3393,6 +3425,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
pos = copy;
}
+ if (skb_frags_not_readable(skb))
+ return 0;
+
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
@@ -3883,7 +3918,9 @@ static inline void skb_split_inside_header(struct sk_buff *skb,
skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
+ skb1->devmem = skb->devmem;
skb_shinfo(skb)->nr_frags = 0;
+ skb->devmem = 0;
skb1->data_len = skb->data_len;
skb1->len += skb1->data_len;
skb->data_len = 0;
@@ -3897,6 +3934,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
{
int i, k = 0;
const int nfrags = skb_shinfo(skb)->nr_frags;
+ const int devmem = skb->devmem;
skb_shinfo(skb)->nr_frags = 0;
skb1->len = skb1->data_len = skb->len - len;
@@ -3930,6 +3968,16 @@ static inline void skb_split_no_header(struct sk_buff *skb,
pos += size;
}
skb_shinfo(skb1)->nr_frags = k;
+
+ if (skb_shinfo(skb)->nr_frags)
+ skb->devmem = devmem;
+ else
+ skb->devmem = 0;
+
+ if (skb_shinfo(skb1)->nr_frags)
+ skb1->devmem = devmem;
+ else
+ skb1->devmem = 0;
}
/**
@@ -4165,6 +4213,9 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
return block_limit - abs_offset;
}
+ if (skb_frags_not_readable(st->cur_skb))
+ return 0;
+
if (st->frag_idx == 0 && !st->frag_data)
st->stepped_offset += skb_headlen(st->cur_skb);
@@ -5779,7 +5830,10 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
(from->pp_recycle && skb_cloned(from)))
return false;
- if (len <= skb_tailroom(to)) {
+ if (skb_frags_not_readable(from) != skb_frags_not_readable(to))
+ return false;
+
+ if (len <= skb_tailroom(to) && !skb_frags_not_readable(from)) {
if (len)
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
@@ -5954,6 +6008,9 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
if (!pskb_may_pull(skb, write_len))
return -ENOMEM;
+ if (skb_frags_not_readable(skb))
+ return -EFAULT;
+
if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
return 0;
@@ -6608,7 +6665,7 @@ void skb_condense(struct sk_buff *skb)
{
if (skb->data_len) {
if (skb->data_len > skb->end - skb->tail ||
- skb_cloned(skb))
+ skb_cloned(skb) || skb_frags_not_readable(skb))
return;
/* Nice, we can free page frag(s) right now */
@@ -2138,6 +2138,9 @@ static int tcp_zerocopy_receive(struct sock *sk,
skb = tcp_recv_skb(sk, seq, &offset);
}
+ if (skb_frags_not_readable(skb))
+ break;
+
if (TCP_SKB_CB(skb)->has_rxtstamp) {
tcp_update_recv_tstamps(skb, tss);
zc->msg_flags |= TCP_CMSG_TS;
@@ -4411,6 +4414,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
if (crypto_ahash_update(req))
return 1;
+ if (skb_frags_not_readable(skb))
+ return 1;
+
for (i = 0; i < shi->nr_frags; ++i) {
const skb_frag_t *f = &shi->frags[i];
unsigned int offset = skb_frag_off(f);
@@ -5264,6 +5264,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
n = tcp_skb_next(skb, list);
+ if (skb_frags_not_readable(skb))
+ goto skip_this;
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb = tcp_collapse_one(sk, skb, list, root);
@@ -5284,17 +5287,20 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
break;
}
- if (n && n != tail && mptcp_skb_can_collapse(skb, n) &&
+ if (n && n != tail && !skb_frags_not_readable(n) &&
+ mptcp_skb_can_collapse(skb, n) &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
end_of_skbs = false;
break;
}
+skip_this:
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
}
if (end_of_skbs ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ skb_frags_not_readable(skb))
return;
__skb_queue_head_init(&tmp);
@@ -5338,7 +5344,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
if (!skb ||
skb == tail ||
!mptcp_skb_can_collapse(nskb, skb) ||
- (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
+ skb_frags_not_readable(skb))
goto end;
#ifdef CONFIG_TLS_DEVICE
if (skb->decrypted != nskb->decrypted)
@@ -2309,7 +2309,8 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (unlikely(TCP_SKB_CB(skb)->eor) ||
tcp_has_tx_tstamp(skb) ||
- !skb_pure_zcopy_same(skb, next))
+ !skb_pure_zcopy_same(skb, next) ||
+ skb_frags_not_readable(skb) != skb_frags_not_readable(next))
return false;
len -= skb->len;
@@ -3193,6 +3194,8 @@ static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
return false;
if (skb_cloned(skb))
return false;
+ if (skb_frags_not_readable(skb))
+ return false;
/* Some heuristics for collapsing over SACK'd could be invented */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
return false;
@@ -2156,7 +2156,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- snaplen = skb->len;
+ snaplen = skb_frags_not_readable(skb) ? skb_headlen(skb) : skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)
@@ -2279,7 +2279,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
}
- snaplen = skb->len;
+ snaplen = skb_frags_not_readable(skb) ? skb_headlen(skb) : skb->len;
res = run_filter(skb, sk, snaplen);
if (!res)