mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
net: mirror skb frag ref/unref helpers
Refactor some of the skb frag ref/unref helpers for improved clarity. Implement napi_pp_get_page() to be the mirror counterpart of napi_pp_put_page(). Implement skb_page_ref() to be the mirror of skb_page_unref(). Improve __skb_frag_ref() to become a mirror counterpart of __skb_frag_unref(). Previously unref could handle pp & non-pp pages, while the ref could only handle non-pp pages. Now both the ref & unref helpers can correctly handle both pp & non-pp pages. Now that __skb_frag_ref() can handle both pp & non-pp pages, remove skb_pp_frag_ref(), and use __skb_frag_ref() instead. This lets us remove pp specific handling from skb_try_coalesce. Additionally, since __skb_frag_ref() can now handle both pp & non-pp pages, a latent issue in skb_shift() should now be fixed. Previously this function would do a non-pp ref & pp unref on potential pp frags (fragfrom). After this patch, skb_shift() should correctly do a pp ref/unref on pp frags. Signed-off-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Jacob Keller <jacob.e.keller@intel.com> Link: https://lore.kernel.org/r/20240410190505.1225848-3-almasrymina@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
f6d827b180
commit
a580ea994f
@ -1659,7 +1659,7 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
|
|||||||
for (i = 0; i < record->num_frags; i++) {
|
for (i = 0; i < record->num_frags; i++) {
|
||||||
skb_shinfo(nskb)->frags[i] = record->frags[i];
|
skb_shinfo(nskb)->frags[i] = record->frags[i];
|
||||||
/* increase the frag ref count */
|
/* increase the frag ref count */
|
||||||
__skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
|
__skb_frag_ref(&skb_shinfo(nskb)->frags[i], nskb->pp_recycle);
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_shinfo(nskb)->nr_frags = record->num_frags;
|
skb_shinfo(nskb)->nr_frags = record->num_frags;
|
||||||
|
@ -2000,7 +2000,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
|
|||||||
skb->len += hlen - swivel;
|
skb->len += hlen - swivel;
|
||||||
|
|
||||||
skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
|
skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
|
||||||
__skb_frag_ref(frag);
|
__skb_frag_ref(frag, skb->pp_recycle);
|
||||||
|
|
||||||
/* any more data? */
|
/* any more data? */
|
||||||
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
|
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
|
||||||
@ -2024,7 +2024,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
|
|||||||
frag++;
|
frag++;
|
||||||
|
|
||||||
skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
|
skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
|
||||||
__skb_frag_ref(frag);
|
__skb_frag_ref(frag, skb->pp_recycle);
|
||||||
RX_USED_ADD(page, hlen + cp->crc_size);
|
RX_USED_ADD(page, hlen + cp->crc_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -717,7 +717,7 @@ static void veth_xdp_get(struct xdp_buff *xdp)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < sinfo->nr_frags; i++)
|
for (i = 0; i < sinfo->nr_frags; i++)
|
||||||
__skb_frag_ref(&sinfo->frags[i]);
|
__skb_frag_ref(&sinfo->frags[i], false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
|
static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
|
||||||
|
@ -8,16 +8,47 @@
|
|||||||
#define _LINUX_SKBUFF_REF_H
|
#define _LINUX_SKBUFF_REF_H
|
||||||
|
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
|
#include <net/page_pool/helpers.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_PAGE_POOL
|
||||||
|
static inline bool is_pp_page(struct page *page)
|
||||||
|
{
|
||||||
|
return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool napi_pp_get_page(struct page *page)
|
||||||
|
{
|
||||||
|
page = compound_head(page);
|
||||||
|
|
||||||
|
if (!is_pp_page(page))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
page_pool_ref_page(page);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline void skb_page_ref(struct page *page, bool recycle)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_PAGE_POOL
|
||||||
|
if (recycle && napi_pp_get_page(page))
|
||||||
|
return;
|
||||||
|
#endif
|
||||||
|
get_page(page);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __skb_frag_ref - take an addition reference on a paged fragment.
|
* __skb_frag_ref - take an addition reference on a paged fragment.
|
||||||
* @frag: the paged fragment
|
* @frag: the paged fragment
|
||||||
|
* @recycle: skb->pp_recycle param of the parent skb. False if no parent skb.
|
||||||
*
|
*
|
||||||
* Takes an additional reference on the paged fragment @frag.
|
* Takes an additional reference on the paged fragment @frag. Obtains the
|
||||||
|
* correct reference count depending on whether skb->pp_recycle is set and
|
||||||
|
* whether the frag is a page pool frag.
|
||||||
*/
|
*/
|
||||||
static inline void __skb_frag_ref(skb_frag_t *frag)
|
static inline void __skb_frag_ref(skb_frag_t *frag, bool recycle)
|
||||||
{
|
{
|
||||||
get_page(skb_frag_page(frag));
|
skb_page_ref(skb_frag_page(frag), recycle);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -29,7 +60,7 @@ static inline void __skb_frag_ref(skb_frag_t *frag)
|
|||||||
*/
|
*/
|
||||||
static inline void skb_frag_ref(struct sk_buff *skb, int f)
|
static inline void skb_frag_ref(struct sk_buff *skb, int f)
|
||||||
{
|
{
|
||||||
__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
|
__skb_frag_ref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool napi_pp_put_page(struct page *page);
|
bool napi_pp_put_page(struct page *page);
|
||||||
|
@ -907,11 +907,6 @@ static void skb_clone_fraglist(struct sk_buff *skb)
|
|||||||
skb_get(list);
|
skb_get(list);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_pp_page(struct page *page)
|
|
||||||
{
|
|
||||||
return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
|
|
||||||
}
|
|
||||||
|
|
||||||
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
|
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
|
||||||
unsigned int headroom)
|
unsigned int headroom)
|
||||||
{
|
{
|
||||||
@ -1033,37 +1028,6 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
|
|||||||
return napi_pp_put_page(virt_to_page(data));
|
return napi_pp_put_page(virt_to_page(data));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
|
|
||||||
* @skb: page pool aware skb
|
|
||||||
*
|
|
||||||
* Increase the fragment reference count (pp_ref_count) of a skb. This is
|
|
||||||
* intended to gain fragment references only for page pool aware skbs,
|
|
||||||
* i.e. when skb->pp_recycle is true, and not for fragments in a
|
|
||||||
* non-pp-recycling skb. It has a fallback to increase references on normal
|
|
||||||
* pages, as page pool aware skbs may also have normal page fragments.
|
|
||||||
*/
|
|
||||||
static int skb_pp_frag_ref(struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
struct skb_shared_info *shinfo;
|
|
||||||
struct page *head_page;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!skb->pp_recycle)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
shinfo = skb_shinfo(skb);
|
|
||||||
|
|
||||||
for (i = 0; i < shinfo->nr_frags; i++) {
|
|
||||||
head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
|
|
||||||
if (likely(is_pp_page(head_page)))
|
|
||||||
page_pool_ref_page(head_page);
|
|
||||||
else
|
|
||||||
page_ref_inc(head_page);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void skb_kfree_head(void *head, unsigned int end_offset)
|
static void skb_kfree_head(void *head, unsigned int end_offset)
|
||||||
{
|
{
|
||||||
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
|
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
|
||||||
@ -4176,7 +4140,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
|
|||||||
to++;
|
to++;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
__skb_frag_ref(fragfrom);
|
__skb_frag_ref(fragfrom, skb->pp_recycle);
|
||||||
skb_frag_page_copy(fragto, fragfrom);
|
skb_frag_page_copy(fragto, fragfrom);
|
||||||
skb_frag_off_copy(fragto, fragfrom);
|
skb_frag_off_copy(fragto, fragfrom);
|
||||||
skb_frag_size_set(fragto, todo);
|
skb_frag_size_set(fragto, todo);
|
||||||
@ -4826,7 +4790,7 @@ normal:
|
|||||||
}
|
}
|
||||||
|
|
||||||
*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
|
*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
|
||||||
__skb_frag_ref(nskb_frag);
|
__skb_frag_ref(nskb_frag, nskb->pp_recycle);
|
||||||
size = skb_frag_size(nskb_frag);
|
size = skb_frag_size(nskb_frag);
|
||||||
|
|
||||||
if (pos < offset) {
|
if (pos < offset) {
|
||||||
@ -5957,10 +5921,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
|
|||||||
/* if the skb is not cloned this does nothing
|
/* if the skb is not cloned this does nothing
|
||||||
* since we set nr_frags to 0.
|
* since we set nr_frags to 0.
|
||||||
*/
|
*/
|
||||||
if (skb_pp_frag_ref(from)) {
|
for (i = 0; i < from_shinfo->nr_frags; i++)
|
||||||
for (i = 0; i < from_shinfo->nr_frags; i++)
|
__skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle);
|
||||||
__skb_frag_ref(&from_shinfo->frags[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
to->truesize += delta;
|
to->truesize += delta;
|
||||||
to->len += len;
|
to->len += len;
|
||||||
|
@ -278,7 +278,7 @@ static int fill_sg_in(struct scatterlist *sg_in,
|
|||||||
for (i = 0; remaining > 0; i++) {
|
for (i = 0; remaining > 0; i++) {
|
||||||
skb_frag_t *frag = &record->frags[i];
|
skb_frag_t *frag = &record->frags[i];
|
||||||
|
|
||||||
__skb_frag_ref(frag);
|
__skb_frag_ref(frag, false);
|
||||||
sg_set_page(sg_in + i, skb_frag_page(frag),
|
sg_set_page(sg_in + i, skb_frag_page(frag),
|
||||||
skb_frag_size(frag), skb_frag_off(frag));
|
skb_frag_size(frag), skb_frag_off(frag));
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user