diff --git a/Documentation/networking/page_pool.rst b/Documentation/networking/page_pool.rst index 873efd97f82280a90637063ed0614644fb4cc923..0aa850cf4447c3ea9431f2d3991c39e3bd6108ec 100644 --- a/Documentation/networking/page_pool.rst +++ b/Documentation/networking/page_pool.rst @@ -13,9 +13,9 @@ replacing dev_alloc_pages(). API keeps track of in-flight pages, in order to let API user know when it is safe to free a page_pool object. Thus, API users -must run page_pool_release_page() when a page is leaving the page_pool or -call page_pool_put_page() where appropriate in order to maintain correct -accounting. +must call page_pool_put_page() to free the page, or attach +the page to a page_pool-aware objects like skbs marked with +skb_mark_for_recycle(). API user must call page_pool_put_page() once on a page, as it will either recycle the page, or in case of refcnt > 1, it will @@ -87,9 +87,6 @@ a page will cause no race conditions is enough. must guarantee safe context (e.g NAPI), since it will recycle the page directly into the pool fast cache. -* page_pool_release_page(): Unmap the page (if mapped) and account for it on - in-flight counters. - * page_pool_dev_alloc_pages(): Get a page from the page allocator or page_pool caches. @@ -194,7 +191,7 @@ NAPI poller if XDP_DROP: page_pool_recycle_direct(page_pool, page); } else (packet_is_skb) { - page_pool_release_page(page_pool, page); + skb_mark_for_recycle(skb); new_page = page_pool_dev_alloc_pages(page_pool); } } diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 84751bb303a684928401c621ae2367d5f30a5c0b..079f9f6ae21aebe9689d40a90a6cef57d38c4ed5 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -1333,7 +1333,7 @@ static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, skb = tsnep_build_skb(rx, page, length); if (skb) { - page_pool_release_page(rx->page_pool, page); + skb_mark_for_recycle(skb); rx->packets++; rx->bytes += length; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e7ca52f0d2f2df1239964df1035cb7fb48e9e414..e1f1c034d325977345d97c2ec952b0f6933282df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5441,7 +5441,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->page); + skb_mark_for_recycle(skb); buf->page = NULL; } @@ -5453,7 +5453,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) priv->dma_conf.dma_buf_sz); /* Data payload appended into SKB */ - page_pool_release_page(rx_q->page_pool, buf->sec_page); + skb_mark_for_recycle(skb); buf->sec_page = NULL; } diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 126f9e294389ae1100b2e760e962cfb7445580f5..f1d5cc1fa13b155b0cc2e39e8a00c958594b38a5 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -18,9 +18,8 @@ * * API keeps track of in-flight pages, in-order to let API user know * when it is safe to dealloactor page_pool object. Thus, API users - * must make sure to call page_pool_release_page() when a page is - * "leaving" the page_pool. Or call page_pool_put_page() where - * appropiate. For maintaining correct accounting. + * must call page_pool_put_page() where appropriate and only attach + * the page to a page_pool-aware objects, like skbs marked for recycling. * * API user must only call page_pool_put_page() once on a page, as it * will either recycle the page, or in case of elevated refcnt, it @@ -251,7 +250,6 @@ void page_pool_unlink_napi(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), struct xdp_mem_info *mem); -void page_pool_release_page(struct page_pool *pool, struct page *page); void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count); #else @@ -268,10 +266,6 @@ static inline void page_pool_use_xdp_mem(struct page_pool *pool, struct xdp_mem_info *mem) { } -static inline void page_pool_release_page(struct page_pool *pool, - struct page *page) -{ -} static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index a3e12a61d456c4f5ded8b6cb4506e64dda46198c..7ca456bfab71c96cc57fce3b9db1f322087a51d2 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -492,7 +492,7 @@ static s32 page_pool_inflight(struct page_pool *pool) * a regular page (that will eventually be returned to the normal * page-allocator via put_page). */ -void page_pool_release_page(struct page_pool *pool, struct page *page) +static void page_pool_return_page(struct page_pool *pool, struct page *page) { dma_addr_t dma; int count; @@ -518,13 +518,6 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) */ count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); trace_page_pool_state_release(pool, page, count); -} -EXPORT_SYMBOL(page_pool_release_page); - -/* Return a page to the page allocator, cleaning up our state */ -static void page_pool_return_page(struct page_pool *pool, struct page *page) -{ - page_pool_release_page(pool, page); put_page(page); /* An optimization would be to call __free_pages(page, pool->p.order) @@ -616,9 +609,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, * will be invoking put_page. */ recycle_stat_inc(pool, released_refcnt); - /* Do not replace this with page_pool_return_page() */ - page_pool_release_page(pool, page); - put_page(page); + page_pool_return_page(pool, page); return NULL; }