Files
linux/include/trace/events/page_pool.h
Mina Almasry 4dec64c52e page_pool: convert to use netmem
Abstract the memory type from the page_pool so we can later add support
for new memory types. Convert the page_pool to use the new netmem type
abstraction, rather than use struct page directly.

As of this patch the netmem type is a no-op abstraction: it's always a
struct page underneath. All the page pool internals are converted to
use struct netmem instead of struct page, and the page pool now exports
2 APIs:

1. The existing struct page API.
2. The new struct netmem API.

Keeping the existing API is transitional; we do not want to refactor all
the current drivers using the page pool at once.

The netmem abstraction is currently a no-op. The page_pool uses
page_to_netmem() to convert allocated pages to netmem, and uses
netmem_to_page() to convert the netmem back to pages to pass to mm APIs,

Follow up patches to this series add non-paged netmem support to the
page_pool. This change is factored out on its own to limit the code
churn to this 1 patch, for ease of code review.

Signed-off-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://patch.msgid.link/20240628003253.1694510-6-almasrymina@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-07-02 18:59:33 -07:00

120 lines
2.7 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM page_pool
#if !defined(_TRACE_PAGE_POOL_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_PAGE_POOL_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include <trace/events/mmflags.h>
#include <net/page_pool/types.h>
TRACE_EVENT(page_pool_release,
TP_PROTO(const struct page_pool *pool,
s32 inflight, u32 hold, u32 release),
TP_ARGS(pool, inflight, hold, release),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
__field(s32, inflight)
__field(u32, hold)
__field(u32, release)
__field(u64, cnt)
),
TP_fast_assign(
__entry->pool = pool;
__entry->inflight = inflight;
__entry->hold = hold;
__entry->release = release;
__entry->cnt = pool->destroy_cnt;
),
TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
__entry->pool, __entry->inflight, __entry->hold,
__entry->release, __entry->cnt)
);
TRACE_EVENT(page_pool_state_release,
TP_PROTO(const struct page_pool *pool,
netmem_ref netmem, u32 release),
TP_ARGS(pool, netmem, release),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
__field(unsigned long, netmem)
__field(u32, release)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->netmem = (__force unsigned long)netmem;
__entry->release = release;
__entry->pfn = netmem_to_pfn(netmem);
),
TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u",
__entry->pool, (void *)__entry->netmem,
__entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
TP_PROTO(const struct page_pool *pool,
netmem_ref netmem, u32 hold),
TP_ARGS(pool, netmem, hold),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
__field(unsigned long, netmem)
__field(u32, hold)
__field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->netmem = (__force unsigned long)netmem;
__entry->hold = hold;
__entry->pfn = netmem_to_pfn(netmem);
),
TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u",
__entry->pool, (void *)__entry->netmem,
__entry->pfn, __entry->hold)
);
TRACE_EVENT(page_pool_update_nid,
TP_PROTO(const struct page_pool *pool, int new_nid),
TP_ARGS(pool, new_nid),
TP_STRUCT__entry(
__field(const struct page_pool *, pool)
__field(int, pool_nid)
__field(int, new_nid)
),
TP_fast_assign(
__entry->pool = pool;
__entry->pool_nid = pool->p.nid;
__entry->new_nid = new_nid;
),
TP_printk("page_pool=%p pool_nid=%d new_nid=%d",
__entry->pool, __entry->pool_nid, __entry->new_nid)
);
#endif /* _TRACE_PAGE_POOL_H */
/* This part must be outside protection */
#include <trace/define_trace.h>