@@ -397,8 +397,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
{
struct kmem_cache *cache;
struct sk_buff *skb;
- u8 *data;
+ unsigned int osize;
bool pfmemalloc;
+ u8 *data;
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@ -430,7 +431,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
- size = SKB_WITH_OVERHEAD(ksize(data));
+ osize = ksize(data);
+ size = SKB_WITH_OVERHEAD(osize);
prefetchw(data + size);
/*
@@ -439,7 +441,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- __build_skb_around(skb, data, 0);
+ __build_skb_around(skb, data, osize);
skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) {
Avoid to call ksize again in __build_skb_around by passing the result of data ksize to __build_skb_around nginx stress test shows this change can reduce ksize cpu usage, and give a little performance boost Signed-off-by: Li RongQing <lirongqing@baidu.com> --- net/core/skbuff.c | 8 +++++--- 1 files changed, 5 insertions(+), 3 deletions(-)