selftests/bpf: Add stacktrace ips test for kprobe_multi/kretprobe_multi

Adding test that attaches kprobe/kretprobe multi and verifies the
ORC stacktrace matches expected functions.

Adding bpf_testmod_stacktrace_test function to bpf_testmod kernel
module which is called through several functions so we get reliable
call path for stacktrace.

The test is only for ORC unwinder to keep it simple.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20251104215405.168643-4-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
Jiri Olsa
2025-11-04 22:54:04 +01:00
committed by Alexei Starovoitov
parent 20a0bc1027
commit c9e208fa93
3 changed files with 171 additions and 0 deletions

View File

@@ -0,0 +1,104 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "stacktrace_ips.skel.h"
#ifdef __x86_64__
static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
{
__u64 ips[PERF_MAX_STACK_DEPTH];
struct ksyms *ksyms = NULL;
int i, err = 0;
va_list args;
/* sorted by addr */
ksyms = load_kallsyms_local();
if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
return -1;
/* unlikely, but... */
if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
return -1;
err = bpf_map_lookup_elem(fd, &key, ips);
if (err)
goto out;
/*
* Compare all symbols provided via arguments with stacktrace ips,
* and their related symbol addresses.t
*/
va_start(args, cnt);
for (i = 0; i < cnt; i++) {
unsigned long val;
struct ksym *ksym;
val = va_arg(args, unsigned long);
ksym = ksym_search_local(ksyms, ips[i]);
if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
break;
ASSERT_EQ(ksym->addr, val, "stack_cmp");
}
va_end(args);
out:
free_kallsyms_local(ksyms);
return err;
}
static void test_stacktrace_ips_kprobe_multi(bool retprobe)
{
LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
.retprobe = retprobe
);
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct stacktrace_ips *skel;
skel = stacktrace_ips__open_and_load();
if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
return;
if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
test__skip();
goto cleanup;
}
skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
skel->progs.kprobe_multi_test,
"bpf_testmod_stacktrace_test", &opts);
if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
trigger_module_test_read(1);
load_kallsyms();
check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
ksym_get_addr("bpf_testmod_stacktrace_test_3"),
ksym_get_addr("bpf_testmod_stacktrace_test_2"),
ksym_get_addr("bpf_testmod_stacktrace_test_1"),
ksym_get_addr("bpf_testmod_test_read"));
cleanup:
stacktrace_ips__destroy(skel);
}
static void __test_stacktrace_ips(void)
{
if (test__start_subtest("kprobe_multi"))
test_stacktrace_ips_kprobe_multi(false);
if (test__start_subtest("kretprobe_multi"))
test_stacktrace_ips_kprobe_multi(true);
}
#else
static void __test_stacktrace_ips(void)
{
test__skip();
}
#endif
void test_stacktrace_ips(void)
{
__test_stacktrace_ips();
}

View File

@@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#ifndef PERF_MAX_STACK_DEPTH
#define PERF_MAX_STACK_DEPTH 127
#endif
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__uint(type, BPF_MAP_TYPE_STACK_TRACE);
__uint(max_entries, 16384);
__type(key, __u32);
__type(value, stack_trace_t);
} stackmap SEC(".maps");
extern bool CONFIG_UNWINDER_ORC __kconfig __weak;
/*
* This function is here to have CONFIG_UNWINDER_ORC
* used and added to object BTF.
*/
int unused(void)
{
return CONFIG_UNWINDER_ORC ? 0 : 1;
}
__u32 stack_key;
SEC("kprobe.multi")
int kprobe_multi_test(struct pt_regs *ctx)
{
stack_key = bpf_get_stackid(ctx, &stackmap, 0);
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@@ -417,6 +417,30 @@ noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
return a + (long)b + c + d + (long)e + f + g + h + i + j + k; return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
} }
noinline void bpf_testmod_stacktrace_test(void)
{
/* used for stacktrace test as attach function */
asm volatile ("");
}
noinline void bpf_testmod_stacktrace_test_3(void)
{
bpf_testmod_stacktrace_test();
asm volatile ("");
}
noinline void bpf_testmod_stacktrace_test_2(void)
{
bpf_testmod_stacktrace_test_3();
asm volatile ("");
}
noinline void bpf_testmod_stacktrace_test_1(void)
{
bpf_testmod_stacktrace_test_2();
asm volatile ("");
}
int bpf_testmod_fentry_ok; int bpf_testmod_fentry_ok;
noinline ssize_t noinline ssize_t
@@ -497,6 +521,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
21, 22, 23, 24, 25, 26) != 231) 21, 22, 23, 24, 25, 26) != 231)
goto out; goto out;
bpf_testmod_stacktrace_test_1();
bpf_testmod_fentry_ok = 1; bpf_testmod_fentry_ok = 1;
out: out:
return -EIO; /* always fail */ return -EIO; /* always fail */