Files
linux/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
Andrii Nakryiko e626a13f6f selftests/bpf: drop unnecessary bpf_iter.h type duplication
Drop bpf_iter.h header which uses vmlinux.h but re-defines a bunch of
iterator structures and some of BPF constants for use in BPF iterator
selftests.

None of that is necessary when fresh vmlinux.h header is generated for
vmlinux image that matches latest selftests. So drop ugly hacks and have
a nice plain vmlinux.h usage everywhere.

We could do the same with all the kfunc __ksym redefinitions, but that
has dependency on very fresh pahole, so I'm not addressing that here.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20241029203919.1948941-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2024-10-29 17:43:29 -07:00

69 lines
1.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
#define MAX_STACK_TRACE_DEPTH 64
unsigned long entries[MAX_STACK_TRACE_DEPTH] = {};
#define SIZE_OF_ULONG (sizeof(unsigned long))
SEC("iter/task")
int dump_task_stack(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
long i, retlen;
if (task == (void *)0)
return 0;
retlen = bpf_get_task_stack(task, entries,
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0);
if (retlen < 0)
return 0;
BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid,
retlen / SIZE_OF_ULONG);
for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) {
if (retlen > i * SIZE_OF_ULONG)
BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]);
}
BPF_SEQ_PRINTF(seq, "\n");
return 0;
}
int num_user_stacks = 0;
SEC("iter/task")
int get_task_user_stacks(struct bpf_iter__task *ctx)
{
struct seq_file *seq = ctx->meta->seq;
struct task_struct *task = ctx->task;
uint64_t buf_sz = 0;
int64_t res;
if (task == (void *)0)
return 0;
res = bpf_get_task_stack(task, entries,
MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, BPF_F_USER_STACK);
if (res <= 0)
return 0;
/* Only one task, the current one, should succeed */
++num_user_stacks;
buf_sz += res;
/* If the verifier doesn't refine bpf_get_task_stack res, and instead
* assumes res is entirely unknown, this program will fail to load as
* the verifier will believe that max buf_sz value allows reading
* past the end of entries in bpf_seq_write call
*/
bpf_seq_write(seq, &entries, buf_sz);
return 0;
}