mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-20 00:31:51 +00:00
This test adds a new struct_ops "bpf_testmod_st_ops" in bpf_testmod.
The ops of the bpf_testmod_st_ops is triggered by new kfunc calls
"bpf_kfunc_st_ops_test_*logue". These new kfunc calls are
primarily used by the SEC("syscall") program. The test triggering
sequence is like:
SEC("syscall")
syscall_prologue(struct st_ops_args *args)
bpf_kfunc_st_op_test_prologue(args)
st_ops->test_prologue(args)
.gen_prologue adds 1000 to args->a
.gen_epilogue adds 10000 to args->a
.gen_epilogue will also set the r0 to 2 * args->a.
The .gen_prologue and .gen_epilogue of the bpf_testmod_st_ops
will test the prog->aux->attach_func_name to decide if
it needs to generate codes.
The main programs of the pro_epilogue.c will call a
new kfunc bpf_kfunc_st_ops_inc10 which does "args->a += 10".
It will also call a subprog() which does "args->a += 1".
This patch uses the test_loader infra to check the __xlated
instructions patched after gen_prologue and/or gen_epilogue.
The __xlated check is based on Eduard's example (Thanks!) in v1.
args->a is returned by the struct_ops prog (either the main prog
or the epilogue). Thus, the __retval of the SEC("syscall") prog
is checked. For example, when triggering the ops in the
'SEC("struct_ops/test_epilogue") int test_epilogue'
The expected args->a is +1 (subprog call) + 10 (kfunc call)
+ 10000 (.gen_epilogue) = 10011.
The expected return value is 2 * 10011 (.gen_epilogue).
Suggested-by: Eduard Zingerman <eddyz87@gmail.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240829210833.388152-7-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
155 lines
3.5 KiB
C
155 lines
3.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
|
|
|
|
#include <vmlinux.h>
|
|
#include <bpf/bpf_tracing.h>
|
|
#include "bpf_misc.h"
|
|
#include "../bpf_testmod/bpf_testmod.h"
|
|
#include "../bpf_testmod/bpf_testmod_kfunc.h"
|
|
|
|
char _license[] SEC("license") = "GPL";
|
|
|
|
void __kfunc_btf_root(void)
|
|
{
|
|
bpf_kfunc_st_ops_inc10(NULL);
|
|
}
|
|
|
|
static __noinline __used int subprog(struct st_ops_args *args)
|
|
{
|
|
args->a += 1;
|
|
return args->a;
|
|
}
|
|
|
|
__success
|
|
/* prologue */
|
|
__xlated("0: r6 = *(u64 *)(r1 +0)")
|
|
__xlated("1: r7 = *(u64 *)(r6 +0)")
|
|
__xlated("2: r7 += 1000")
|
|
__xlated("3: *(u64 *)(r6 +0) = r7")
|
|
/* main prog */
|
|
__xlated("4: r1 = *(u64 *)(r1 +0)")
|
|
__xlated("5: r6 = r1")
|
|
__xlated("6: call kernel-function")
|
|
__xlated("7: r1 = r6")
|
|
__xlated("8: call pc+1")
|
|
__xlated("9: exit")
|
|
SEC("struct_ops/test_prologue")
|
|
__naked int test_prologue(void)
|
|
{
|
|
asm volatile (
|
|
"r1 = *(u64 *)(r1 +0);"
|
|
"r6 = r1;"
|
|
"call %[bpf_kfunc_st_ops_inc10];"
|
|
"r1 = r6;"
|
|
"call subprog;"
|
|
"exit;"
|
|
:
|
|
: __imm(bpf_kfunc_st_ops_inc10)
|
|
: __clobber_all);
|
|
}
|
|
|
|
__success
|
|
/* save __u64 *ctx to stack */
|
|
__xlated("0: *(u64 *)(r10 -8) = r1")
|
|
/* main prog */
|
|
__xlated("1: r1 = *(u64 *)(r1 +0)")
|
|
__xlated("2: r6 = r1")
|
|
__xlated("3: call kernel-function")
|
|
__xlated("4: r1 = r6")
|
|
__xlated("5: call pc+")
|
|
/* epilogue */
|
|
__xlated("6: r1 = *(u64 *)(r10 -8)")
|
|
__xlated("7: r1 = *(u64 *)(r1 +0)")
|
|
__xlated("8: r6 = *(u64 *)(r1 +0)")
|
|
__xlated("9: r6 += 10000")
|
|
__xlated("10: *(u64 *)(r1 +0) = r6")
|
|
__xlated("11: r0 = r6")
|
|
__xlated("12: r0 *= 2")
|
|
__xlated("13: exit")
|
|
SEC("struct_ops/test_epilogue")
|
|
__naked int test_epilogue(void)
|
|
{
|
|
asm volatile (
|
|
"r1 = *(u64 *)(r1 +0);"
|
|
"r6 = r1;"
|
|
"call %[bpf_kfunc_st_ops_inc10];"
|
|
"r1 = r6;"
|
|
"call subprog;"
|
|
"exit;"
|
|
:
|
|
: __imm(bpf_kfunc_st_ops_inc10)
|
|
: __clobber_all);
|
|
}
|
|
|
|
__success
|
|
/* prologue */
|
|
__xlated("0: r6 = *(u64 *)(r1 +0)")
|
|
__xlated("1: r7 = *(u64 *)(r6 +0)")
|
|
__xlated("2: r7 += 1000")
|
|
__xlated("3: *(u64 *)(r6 +0) = r7")
|
|
/* save __u64 *ctx to stack */
|
|
__xlated("4: *(u64 *)(r10 -8) = r1")
|
|
/* main prog */
|
|
__xlated("5: r1 = *(u64 *)(r1 +0)")
|
|
__xlated("6: r6 = r1")
|
|
__xlated("7: call kernel-function")
|
|
__xlated("8: r1 = r6")
|
|
__xlated("9: call pc+")
|
|
/* epilogue */
|
|
__xlated("10: r1 = *(u64 *)(r10 -8)")
|
|
__xlated("11: r1 = *(u64 *)(r1 +0)")
|
|
__xlated("12: r6 = *(u64 *)(r1 +0)")
|
|
__xlated("13: r6 += 10000")
|
|
__xlated("14: *(u64 *)(r1 +0) = r6")
|
|
__xlated("15: r0 = r6")
|
|
__xlated("16: r0 *= 2")
|
|
__xlated("17: exit")
|
|
SEC("struct_ops/test_pro_epilogue")
|
|
__naked int test_pro_epilogue(void)
|
|
{
|
|
asm volatile (
|
|
"r1 = *(u64 *)(r1 +0);"
|
|
"r6 = r1;"
|
|
"call %[bpf_kfunc_st_ops_inc10];"
|
|
"r1 = r6;"
|
|
"call subprog;"
|
|
"exit;"
|
|
:
|
|
: __imm(bpf_kfunc_st_ops_inc10)
|
|
: __clobber_all);
|
|
}
|
|
|
|
SEC("syscall")
|
|
__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */
|
|
int syscall_prologue(void *ctx)
|
|
{
|
|
struct st_ops_args args = {};
|
|
|
|
return bpf_kfunc_st_ops_test_prologue(&args);
|
|
}
|
|
|
|
SEC("syscall")
|
|
__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
|
|
int syscall_epilogue(void *ctx)
|
|
{
|
|
struct st_ops_args args = {};
|
|
|
|
return bpf_kfunc_st_ops_test_epilogue(&args);
|
|
}
|
|
|
|
SEC("syscall")
|
|
__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
|
|
int syscall_pro_epilogue(void *ctx)
|
|
{
|
|
struct st_ops_args args = {};
|
|
|
|
return bpf_kfunc_st_ops_test_pro_epilogue(&args);
|
|
}
|
|
|
|
SEC(".struct_ops.link")
|
|
struct bpf_testmod_st_ops pro_epilogue = {
|
|
.test_prologue = (void *)test_prologue,
|
|
.test_epilogue = (void *)test_epilogue,
|
|
.test_pro_epilogue = (void *)test_pro_epilogue,
|
|
};
|