mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
bc83674870
Add page_pool_update_nid() to be called by page pool consumers when they detect numa node changes. It will update the page pool nid value to start allocating from the new effective numa node. This is to mitigate page pool allocating pages from a wrong numa node, where the pool was originally allocated, and holding on to pages that belong to a different numa node, which causes performance degradation. For pages that are already being consumed and could be returned to the pool by the consumer, in next patch we will add a check per page to avoid recycling them back to the pool and return them to the page allocator. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com> Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
118 lines
2.6 KiB
C
118 lines
2.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#undef TRACE_SYSTEM
|
|
#define TRACE_SYSTEM page_pool
|
|
|
|
#if !defined(_TRACE_PAGE_POOL_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
#define _TRACE_PAGE_POOL_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/tracepoint.h>
|
|
|
|
#include <trace/events/mmflags.h>
|
|
#include <net/page_pool.h>
|
|
|
|
TRACE_EVENT(page_pool_release,
|
|
|
|
TP_PROTO(const struct page_pool *pool,
|
|
s32 inflight, u32 hold, u32 release),
|
|
|
|
TP_ARGS(pool, inflight, hold, release),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(const struct page_pool *, pool)
|
|
__field(s32, inflight)
|
|
__field(u32, hold)
|
|
__field(u32, release)
|
|
__field(u64, cnt)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pool = pool;
|
|
__entry->inflight = inflight;
|
|
__entry->hold = hold;
|
|
__entry->release = release;
|
|
__entry->cnt = pool->destroy_cnt;
|
|
),
|
|
|
|
TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
|
|
__entry->pool, __entry->inflight, __entry->hold,
|
|
__entry->release, __entry->cnt)
|
|
);
|
|
|
|
TRACE_EVENT(page_pool_state_release,
|
|
|
|
TP_PROTO(const struct page_pool *pool,
|
|
const struct page *page, u32 release),
|
|
|
|
TP_ARGS(pool, page, release),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(const struct page_pool *, pool)
|
|
__field(const struct page *, page)
|
|
__field(u32, release)
|
|
__field(unsigned long, pfn)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pool = pool;
|
|
__entry->page = page;
|
|
__entry->release = release;
|
|
__entry->pfn = page_to_pfn(page);
|
|
),
|
|
|
|
TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
|
|
__entry->pool, __entry->page, __entry->pfn, __entry->release)
|
|
);
|
|
|
|
TRACE_EVENT(page_pool_state_hold,
|
|
|
|
TP_PROTO(const struct page_pool *pool,
|
|
const struct page *page, u32 hold),
|
|
|
|
TP_ARGS(pool, page, hold),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(const struct page_pool *, pool)
|
|
__field(const struct page *, page)
|
|
__field(u32, hold)
|
|
__field(unsigned long, pfn)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pool = pool;
|
|
__entry->page = page;
|
|
__entry->hold = hold;
|
|
__entry->pfn = page_to_pfn(page);
|
|
),
|
|
|
|
TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
|
|
__entry->pool, __entry->page, __entry->pfn, __entry->hold)
|
|
);
|
|
|
|
TRACE_EVENT(page_pool_update_nid,
|
|
|
|
TP_PROTO(const struct page_pool *pool, int new_nid),
|
|
|
|
TP_ARGS(pool, new_nid),
|
|
|
|
TP_STRUCT__entry(
|
|
__field(const struct page_pool *, pool)
|
|
__field(int, pool_nid)
|
|
__field(int, new_nid)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->pool = pool;
|
|
__entry->pool_nid = pool->p.nid;
|
|
__entry->new_nid = new_nid;
|
|
),
|
|
|
|
TP_printk("page_pool=%p pool_nid=%d new_nid=%d",
|
|
__entry->pool, __entry->pool_nid, __entry->new_nid)
|
|
);
|
|
|
|
#endif /* _TRACE_PAGE_POOL_H */
|
|
|
|
/* This part must be outside protection */
|
|
#include <trace/define_trace.h>
|