mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 00:21:59 +00:00
page_pool: keep pp info as long as page pool owns the page
Currently, page->pp is cleared and set everytime the page is recycled, which is unnecessary. So only set the page->pp when the page is added to the page pool and only clear it when the page is released from the page pool. This is also a preparation to support allocating frag page in page pool. Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
2a2b6e3640
commit
57f05bc2ab
@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
|
||||
if (!skb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
|
||||
skb_mark_for_recycle(skb);
|
||||
|
||||
skb_reserve(skb, xdp->data - xdp->data_hard_start);
|
||||
skb_put(skb, xdp->data_end - xdp->data);
|
||||
@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
skb_frag_page(frag), skb_frag_off(frag),
|
||||
skb_frag_size(frag), PAGE_SIZE);
|
||||
/* We don't need to reset pp_recycle here. It's already set, so
|
||||
* just mark fragments for recycling.
|
||||
*/
|
||||
page_pool_store_mem_info(skb_frag_page(frag), pool);
|
||||
}
|
||||
|
||||
return skb;
|
||||
|
@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
|
||||
}
|
||||
|
||||
if (pp)
|
||||
skb_mark_for_recycle(skb, page, pp);
|
||||
skb_mark_for_recycle(skb);
|
||||
else
|
||||
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
|
||||
bm_pool->buf_size, DMA_FROM_DEVICE,
|
||||
|
@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
|
||||
/* mark skb for recycling */
|
||||
skb_mark_for_recycle(skb, page, pool);
|
||||
skb_mark_for_recycle(skb);
|
||||
netif_receive_skb(skb);
|
||||
|
||||
ndev->stats.rx_bytes += len;
|
||||
|
@ -375,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
|
||||
/* mark skb for recycling */
|
||||
skb_mark_for_recycle(skb, page, pool);
|
||||
skb_mark_for_recycle(skb);
|
||||
netif_receive_skb(skb);
|
||||
|
||||
ndev->stats.rx_bytes += len;
|
||||
|
@ -4712,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL
|
||||
static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
|
||||
struct page_pool *pp)
|
||||
static inline void skb_mark_for_recycle(struct sk_buff *skb)
|
||||
{
|
||||
skb->pp_recycle = 1;
|
||||
page_pool_store_mem_info(page, pp);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -253,11 +253,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
|
||||
spin_unlock_bh(&pool->ring.producer_lock);
|
||||
}
|
||||
|
||||
/* Store mem_info on struct page and use it while recycling skb frags */
|
||||
static inline
|
||||
void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
|
||||
{
|
||||
page->pp = pp;
|
||||
}
|
||||
|
||||
#endif /* _NET_PAGE_POOL_H */
|
||||
|
@ -206,6 +206,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void page_pool_set_pp_info(struct page_pool *pool,
|
||||
struct page *page)
|
||||
{
|
||||
page->pp = pool;
|
||||
page->pp_magic |= PP_SIGNATURE;
|
||||
}
|
||||
|
||||
static void page_pool_clear_pp_info(struct page *page)
|
||||
{
|
||||
page->pp_magic = 0;
|
||||
page->pp = NULL;
|
||||
}
|
||||
|
||||
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
|
||||
gfp_t gfp)
|
||||
{
|
||||
@ -222,7 +235,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page->pp_magic |= PP_SIGNATURE;
|
||||
page_pool_set_pp_info(pool, page);
|
||||
|
||||
/* Track how many pages are held 'in-flight' */
|
||||
pool->pages_state_hold_cnt++;
|
||||
@ -266,7 +279,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
|
||||
put_page(page);
|
||||
continue;
|
||||
}
|
||||
page->pp_magic |= PP_SIGNATURE;
|
||||
|
||||
page_pool_set_pp_info(pool, page);
|
||||
pool->alloc.cache[pool->alloc.count++] = page;
|
||||
/* Track how many pages are held 'in-flight' */
|
||||
pool->pages_state_hold_cnt++;
|
||||
@ -345,7 +359,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
page_pool_set_dma_addr(page, 0);
|
||||
skip_dma_unmap:
|
||||
page->pp_magic = 0;
|
||||
page_pool_clear_pp_info(page);
|
||||
|
||||
/* This may be the last page returned, releasing the pool, so
|
||||
* it is not safe to reference pool afterwards.
|
||||
@ -644,7 +658,6 @@ bool page_pool_return_skb_page(struct page *page)
|
||||
* The page will be returned to the pool here regardless of the
|
||||
* 'flipped' fragment being in use or not.
|
||||
*/
|
||||
page->pp = NULL;
|
||||
page_pool_put_full_page(pp, page, false);
|
||||
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user