From 75011ad69bc54396703867b5434f1622343a848e Mon Sep 17 00:00:00 2001 From: Carlos Llamas Date: Sun, 30 Mar 2025 21:13:23 +0000 Subject: [PATCH 001/135] libbpf: Fix implicit memfd_create() for bionic Since memfd_create() is not consistently available across different bionic libc implementations, using memfd_create() directly can break some Android builds: tools/lib/bpf/linker.c:576:7: error: implicit declaration of function 'memfd_create' [-Werror,-Wimplicit-function-declaration] 576 | fd = memfd_create(filename, 0); | ^ To fix this, relocate and inline the sys_memfd_create() helper so that it can be used in "linker.c". Similar issues were previously fixed by commit 9fa5e1a180aa ("libbpf: Call memfd_create() syscall directly"). Fixes: 6d5e5e5d7ce1 ("libbpf: Extend linker API to support in-memory ELF files") Signed-off-by: Carlos Llamas Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250330211325.530677-1-cmllamas@google.com --- tools/lib/bpf/libbpf.c | 9 --------- tools/lib/bpf/libbpf_internal.h | 9 +++++++++ tools/lib/bpf/linker.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6b85060f07b3..37d563e14051 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1725,15 +1725,6 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam return ERR_PTR(-ENOENT); } -/* Some versions of Android don't provide memfd_create() in their libc - * implementation, so avoid complications and just go straight to Linux - * syscall. - */ -static int sys_memfd_create(const char *name, unsigned flags) -{ - return syscall(__NR_memfd_create, name, flags); -} - #ifndef MFD_CLOEXEC #define MFD_CLOEXEC 0x0001U #endif diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 76669c73dcd1..477a3b3389a0 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -667,6 +667,15 @@ static inline int sys_dup3(int oldfd, int newfd, int flags) return syscall(__NR_dup3, oldfd, newfd, flags); } +/* Some versions of Android don't provide memfd_create() in their libc + * implementation, so avoid complications and just go straight to Linux + * syscall. + */ +static inline int sys_memfd_create(const char *name, unsigned flags) +{ + return syscall(__NR_memfd_create, name, flags); +} + /* Point *fixed_fd* to the same file that *tmp_fd* points to. * Regardless of success, *tmp_fd* is closed. * Whatever *fixed_fd* pointed to is closed silently. diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 800e0ef09c37..56f5068e2eba 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -573,7 +573,7 @@ int bpf_linker__add_buf(struct bpf_linker *linker, void *buf, size_t buf_sz, snprintf(filename, sizeof(filename), "mem:%p+%zu", buf, buf_sz); - fd = memfd_create(filename, 0); + fd = sys_memfd_create(filename, 0); if (fd < 0) { ret = -errno; pr_warn("failed to create memfd '%s': %s\n", filename, errstr(ret)); From 62aa5790cec89108a23052cc2f00fdd31f9adbac Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 31 Mar 2025 20:36:17 +0000 Subject: [PATCH 002/135] bpf: Fix a comment describing bpf_attr The map_fd field of the bpf_attr union is used in the BPF_MAP_FREEZE syscall. Explicitly mention this in the comments. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250331203618.1973691-2-a.s.protopopov@gmail.com --- include/uapi/linux/bpf.h | 2 +- tools/include/uapi/linux/bpf.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 28705ae67784..07ee73cdf97b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1506,7 +1506,7 @@ union bpf_attr { __s32 map_token_fd; }; - struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ + struct { /* anonymous struct used by BPF_MAP_*_ELEM and BPF_MAP_FREEZE commands */ __u32 map_fd; __aligned_u64 key; union { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 28705ae67784..07ee73cdf97b 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1506,7 +1506,7 @@ union bpf_attr { __s32 map_token_fd; }; - struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ + struct { /* anonymous struct used by BPF_MAP_*_ELEM and BPF_MAP_FREEZE commands */ __u32 map_fd; __aligned_u64 key; union { From dafae1ae2ad32d124ce4efb2105c00ccadc7bd32 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Mon, 31 Mar 2025 20:36:18 +0000 Subject: [PATCH 003/135] libbpf: Add likely/unlikely macros and use them in selftests A few selftests and, more importantly, consequent changes to the bpf_helpers.h file, use likely/unlikely macros, so define them here and remove duplicate definitions from existing selftests. Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250331203618.1973691-3-a.s.protopopov@gmail.com --- tools/lib/bpf/bpf_helpers.h | 8 ++++++++ tools/testing/selftests/bpf/bpf_arena_spin_lock.h | 3 --- tools/testing/selftests/bpf/progs/iters.c | 2 -- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 686824b8b413..a50773d4616e 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -15,6 +15,14 @@ #define __array(name, val) typeof(val) *name[] #define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name +#ifndef likely +#define likely(x) (__builtin_expect(!!(x), 1)) +#endif + +#ifndef unlikely +#define unlikely(x) (__builtin_expect(!!(x), 0)) +#endif + /* * Helper macro to place programs, maps, license in * different sections in elf_bpf file. Section names diff --git a/tools/testing/selftests/bpf/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/bpf_arena_spin_lock.h index fb8dc0768999..4e29c31c4ef8 100644 --- a/tools/testing/selftests/bpf/bpf_arena_spin_lock.h +++ b/tools/testing/selftests/bpf/bpf_arena_spin_lock.h @@ -95,9 +95,6 @@ struct arena_qnode { #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET) #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET) -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) - struct arena_qnode __arena qnodes[_Q_MAX_CPUS][_Q_MAX_NODES]; static inline u32 encode_tail(int cpu, int idx) diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 427b72954b87..76adf4a8f2da 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -7,8 +7,6 @@ #include "bpf_misc.h" #include "bpf_compiler.h" -#define unlikely(x) __builtin_expect(!!(x), 0) - static volatile int zero = 0; int my_pid; From c9661394850d1003984e580aeed7b0e51decee91 Mon Sep 17 00:00:00 2001 From: Chen Ni Date: Tue, 1 Apr 2025 14:15:46 +0800 Subject: [PATCH 004/135] selftests/bpf: Convert comma to semicolon Replace comma between expressions with semicolons. Using a ',' in place of a ';' can have unintended side effects. Although that is not the case here, it is seems best to use ';' unless ',' is intended. Found by inspection. No functional change intended. Compile tested only. Signed-off-by: Chen Ni Signed-off-by: Andrii Nakryiko Reviewed-by: Amery Hung Link: https://lore.kernel.org/bpf/20250401061546.1990156-1-nichen@iscas.ac.cn --- tools/testing/selftests/bpf/test_kmods/bpf_testmod.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index 3220f1d28697..f38eaf0d35ef 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -1340,7 +1340,7 @@ static int st_ops_gen_prologue_with_kfunc(struct bpf_insn *insn_buf, bool direct *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a)); *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); - *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id), + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_8); *insn++ = prog->insnsi[0]; @@ -1379,7 +1379,7 @@ static int st_ops_gen_epilogue_with_kfunc(struct bpf_insn *insn_buf, const struc *insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a)); *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 2); *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); - *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id), + *insn++ = BPF_CALL_KFUNC(0, bpf_cgroup_release_id); *insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6); *insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2); *insn++ = BPF_EXIT_INSN(); From 37b1b3ed20c39f8df78d00d1912e67efd3de4f93 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Tue, 8 Apr 2025 11:45:44 +0100 Subject: [PATCH 005/135] selftests/bpf: Support struct/union presets in veristat Extend commit e3c9abd0d14b ("selftests/bpf: Implement setting global variables in veristat") to support applying presets to members of the global structs or unions in veristat. For example: ``` ./veristat set_global_vars.bpf.o -G "union1.struct3.var_u8_h = 0xBB" ``` Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250408104544.140317-1-mykyta.yatsenko5@gmail.com --- .../selftests/bpf/prog_tests/test_veristat.c | 5 + tools/testing/selftests/bpf/progs/prepare.c | 1 - .../selftests/bpf/progs/set_global_vars.c | 41 +++++++ tools/testing/selftests/bpf/veristat.c | 101 ++++++++++++++++-- 4 files changed, 140 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/test_veristat.c b/tools/testing/selftests/bpf/prog_tests/test_veristat.c index a95b42bf744a..47b56c258f3f 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_veristat.c +++ b/tools/testing/selftests/bpf/prog_tests/test_veristat.c @@ -63,6 +63,9 @@ static void test_set_global_vars_succeeds(void) " -G \"var_eb = EB2\" "\ " -G \"var_ec = EC2\" "\ " -G \"var_b = 1\" "\ + " -G \"struct1.struct2.u.var_u8 = 170\" "\ + " -G \"union1.struct3.var_u8_l = 0xaa\" "\ + " -G \"union1.struct3.var_u8_h = 0xaa\" "\ "-vl2 > %s", fix->veristat, fix->tmpfile); read(fix->fd, fix->output, fix->sz); @@ -78,6 +81,8 @@ static void test_set_global_vars_succeeds(void) __CHECK_STR("_w=12 ", "var_eb = EB2"); __CHECK_STR("_w=13 ", "var_ec = EC2"); __CHECK_STR("_w=1 ", "var_b = 1"); + __CHECK_STR("_w=170 ", "struct1.struct2.u.var_u8 = 170"); + __CHECK_STR("_w=0xaaaa ", "union1.var_u16 = 0xaaaa"); out: teardown_fixture(fix); diff --git a/tools/testing/selftests/bpf/progs/prepare.c b/tools/testing/selftests/bpf/progs/prepare.c index 1f1dd547e4ee..cfc1f48e0d28 100644 --- a/tools/testing/selftests/bpf/progs/prepare.c +++ b/tools/testing/selftests/bpf/progs/prepare.c @@ -2,7 +2,6 @@ /* Copyright (c) 2025 Meta */ #include #include -//#include char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/set_global_vars.c b/tools/testing/selftests/bpf/progs/set_global_vars.c index 9adb5ba4cd4d..90f5656c3991 100644 --- a/tools/testing/selftests/bpf/progs/set_global_vars.c +++ b/tools/testing/selftests/bpf/progs/set_global_vars.c @@ -24,6 +24,44 @@ const volatile enum Enumu64 var_eb = EB1; const volatile enum Enums64 var_ec = EC1; const volatile bool var_b = false; +struct Struct { + int:16; + __u16 filler; + struct { + const __u16 filler2; + }; + struct Struct2 { + __u16 filler; + volatile struct { + const int:1; + union { + const volatile __u8 var_u8; + const volatile __s16 filler3; + const int:1; + } u; + }; + } struct2; +}; + +const volatile __u32 stru = 0; /* same prefix as below */ +const volatile struct Struct struct1 = {.struct2 = {.u = {.var_u8 = 1}}}; + +union Union { + __u16 var_u16; + struct Struct3 { + struct { + __u8 var_u8_l; + }; + struct { + struct { + __u8 var_u8_h; + }; + }; + } struct3; +}; + +const volatile union Union union1 = {.var_u16 = -1}; + char arr[4] = {0}; SEC("socket") @@ -43,5 +81,8 @@ int test_set_globals(void *ctx) a = var_eb; a = var_ec; a = var_b; + a = struct1.struct2.u.var_u8; + a = union1.var_u16; + return a; } diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index a18972ffdeb6..b2bb20b00952 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -1486,7 +1486,84 @@ static bool is_preset_supported(const struct btf_type *t) return btf_is_int(t) || btf_is_enum(t) || btf_is_enum64(t); } -static int set_global_var(struct bpf_object *obj, struct btf *btf, const struct btf_type *t, +const int btf_find_member(const struct btf *btf, + const struct btf_type *parent_type, + __u32 parent_offset, + const char *member_name, + int *member_tid, + __u32 *member_offset) +{ + int i; + + if (!btf_is_composite(parent_type)) + return -EINVAL; + + for (i = 0; i < btf_vlen(parent_type); ++i) { + const struct btf_member *member; + const struct btf_type *member_type; + int tid; + + member = btf_members(parent_type) + i; + tid = btf__resolve_type(btf, member->type); + if (tid < 0) + return -EINVAL; + + member_type = btf__type_by_id(btf, tid); + if (member->name_off) { + const char *name = btf__name_by_offset(btf, member->name_off); + + if (strcmp(member_name, name) == 0) { + if (btf_member_bitfield_size(parent_type, i) != 0) { + fprintf(stderr, "Bitfield presets are not supported %s\n", + name); + return -EINVAL; + } + *member_offset = parent_offset + member->offset; + *member_tid = tid; + return 0; + } + } else if (btf_is_composite(member_type)) { + int err; + + err = btf_find_member(btf, member_type, parent_offset + member->offset, + member_name, member_tid, member_offset); + if (!err) + return 0; + } + } + + return -EINVAL; +} + +static int adjust_var_secinfo(struct btf *btf, const struct btf_type *t, + struct btf_var_secinfo *sinfo, const char *var) +{ + char expr[256], *saveptr; + const struct btf_type *base_type, *member_type; + int err, member_tid; + char *name; + __u32 member_offset = 0; + + base_type = btf__type_by_id(btf, btf__resolve_type(btf, t->type)); + snprintf(expr, sizeof(expr), "%s", var); + strtok_r(expr, ".", &saveptr); + + while ((name = strtok_r(NULL, ".", &saveptr))) { + err = btf_find_member(btf, base_type, 0, name, &member_tid, &member_offset); + if (err) { + fprintf(stderr, "Could not find member %s for variable %s\n", name, var); + return err; + } + member_type = btf__type_by_id(btf, member_tid); + sinfo->offset += member_offset / 8; + sinfo->size = member_type->size; + sinfo->type = member_tid; + base_type = member_type; + } + return 0; +} + +static int set_global_var(struct bpf_object *obj, struct btf *btf, struct bpf_map *map, struct btf_var_secinfo *sinfo, struct var_preset *preset) { @@ -1495,9 +1572,9 @@ static int set_global_var(struct bpf_object *obj, struct btf *btf, const struct long long value = preset->ivalue; size_t size; - base_type = btf__type_by_id(btf, btf__resolve_type(btf, t->type)); + base_type = btf__type_by_id(btf, btf__resolve_type(btf, sinfo->type)); if (!base_type) { - fprintf(stderr, "Failed to resolve type %d\n", t->type); + fprintf(stderr, "Failed to resolve type %d\n", sinfo->type); return -EINVAL; } if (!is_preset_supported(base_type)) { @@ -1530,7 +1607,7 @@ static int set_global_var(struct bpf_object *obj, struct btf *btf, const struct if (value >= max_val || value < -max_val) { fprintf(stderr, "Variable %s value %lld is out of range [%lld; %lld]\n", - btf__name_by_offset(btf, t->name_off), value, + btf__name_by_offset(btf, base_type->name_off), value, is_signed ? -max_val : 0, max_val - 1); return -EINVAL; } @@ -1583,14 +1660,20 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i for (j = 0; j < n; ++j, ++sinfo) { const struct btf_type *var_type = btf__type_by_id(btf, sinfo->type); const char *var_name; + int var_len; if (!btf_is_var(var_type)) continue; var_name = btf__name_by_offset(btf, var_type->name_off); + var_len = strlen(var_name); for (k = 0; k < npresets; ++k) { - if (strcmp(var_name, presets[k].name) != 0) + struct btf_var_secinfo tmp_sinfo; + + if (strncmp(var_name, presets[k].name, var_len) != 0 || + (presets[k].name[var_len] != '\0' && + presets[k].name[var_len] != '.')) continue; if (presets[k].applied) { @@ -1598,13 +1681,17 @@ static int set_global_vars(struct bpf_object *obj, struct var_preset *presets, i var_name); return -EINVAL; } + tmp_sinfo = *sinfo; + err = adjust_var_secinfo(btf, var_type, + &tmp_sinfo, presets[k].name); + if (err) + return err; - err = set_global_var(obj, btf, var_type, map, sinfo, presets + k); + err = set_global_var(obj, btf, map, &tmp_sinfo, presets + k); if (err) return err; presets[k].applied = true; - break; } } } From 243d720e2e5320f7ed09034361aa20b74d8eeb03 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Wed, 9 Apr 2025 00:44:16 +0100 Subject: [PATCH 006/135] libbpf: Add getters for BTF.ext func and line info Introducing new libbpf API getters for BTF.ext func and line info, namely: bpf_program__func_info bpf_program__func_info_cnt bpf_program__line_info bpf_program__line_info_cnt This change enables scenarios, when user needs to load bpf_program directly using `bpf_prog_load`, instead of higher-level `bpf_object__load`. Line and func info are required for checking BTF info in verifier; verification may fail without these fields if, for example, program calls `bpf_obj_new`. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250408234417.452565-2-mykyta.yatsenko5@gmail.com --- tools/lib/bpf/libbpf.c | 24 ++++++++++++++++++++++++ tools/lib/bpf/libbpf.h | 6 ++++++ tools/lib/bpf/libbpf.map | 4 ++++ 3 files changed, 34 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 37d563e14051..b2591f5cab65 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9446,6 +9446,30 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log return 0; } +struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog) +{ + if (prog->func_info_rec_size != sizeof(struct bpf_func_info)) + return libbpf_err_ptr(-EOPNOTSUPP); + return prog->func_info; +} + +__u32 bpf_program__func_info_cnt(const struct bpf_program *prog) +{ + return prog->func_info_cnt; +} + +struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog) +{ + if (prog->line_info_rec_size != sizeof(struct bpf_line_info)) + return libbpf_err_ptr(-EOPNOTSUPP); + return prog->line_info; +} + +__u32 bpf_program__line_info_cnt(const struct bpf_program *prog) +{ + return prog->line_info_cnt; +} + #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ .sec = (char *)sec_pfx, \ .prog_type = BPF_PROG_TYPE_##ptype, \ diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index e0605403f977..d39f19c8396d 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -940,6 +940,12 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size); LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size); +LIBBPF_API struct bpf_func_info *bpf_program__func_info(const struct bpf_program *prog); +LIBBPF_API __u32 bpf_program__func_info_cnt(const struct bpf_program *prog); + +LIBBPF_API struct bpf_line_info *bpf_program__line_info(const struct bpf_program *prog); +LIBBPF_API __u32 bpf_program__line_info_cnt(const struct bpf_program *prog); + /** * @brief **bpf_program__set_attach_target()** sets BTF-based attach target * for supported BPF program types: diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index d8b71f22f197..1205f9a4fe04 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -437,6 +437,10 @@ LIBBPF_1.6.0 { bpf_linker__add_fd; bpf_linker__new_fd; bpf_object__prepare; + bpf_program__func_info; + bpf_program__func_info_cnt; + bpf_program__line_info; + bpf_program__line_info_cnt; btf__add_decl_attr; btf__add_type_attr; } LIBBPF_1.5.0; From b8390dd1e09e1abf73299209e9e7bcc2585b88e6 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Wed, 9 Apr 2025 00:44:17 +0100 Subject: [PATCH 007/135] selftests/bpf: Add BTF.ext line/func info getter tests Add selftests checking that line and func info retrieved by newly added libbpf APIs are the same as returned by kernel via bpf_prog_get_info_by_fd. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250408234417.452565-3-mykyta.yatsenko5@gmail.com --- .../selftests/bpf/prog_tests/test_btf_ext.c | 64 +++++++++++++++++++ .../selftests/bpf/progs/test_btf_ext.c | 22 +++++++ 2 files changed, 86 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/test_btf_ext.c create mode 100644 tools/testing/selftests/bpf/progs/test_btf_ext.c diff --git a/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c b/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c new file mode 100644 index 000000000000..7d1b478c99a0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_btf_ext.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms Inc. */ +#include +#include "test_btf_ext.skel.h" +#include "btf_helpers.h" + +static void subtest_line_func_info(void) +{ + struct test_btf_ext *skel; + struct bpf_prog_info info; + struct bpf_line_info line_info[128], *libbpf_line_info; + struct bpf_func_info func_info[128], *libbpf_func_info; + __u32 info_len = sizeof(info), libbbpf_line_info_cnt, libbbpf_func_info_cnt; + int err, fd; + + skel = test_btf_ext__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + fd = bpf_program__fd(skel->progs.global_func); + + memset(&info, 0, sizeof(info)); + info.line_info = ptr_to_u64(&line_info); + info.nr_line_info = sizeof(line_info); + info.line_info_rec_size = sizeof(*line_info); + err = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(err, "prog_line_info")) + goto out; + + libbpf_line_info = bpf_program__line_info(skel->progs.global_func); + libbbpf_line_info_cnt = bpf_program__line_info_cnt(skel->progs.global_func); + + memset(&info, 0, sizeof(info)); + info.func_info = ptr_to_u64(&func_info); + info.nr_func_info = sizeof(func_info); + info.func_info_rec_size = sizeof(*func_info); + err = bpf_prog_get_info_by_fd(fd, &info, &info_len); + if (!ASSERT_OK(err, "prog_func_info")) + goto out; + + libbpf_func_info = bpf_program__func_info(skel->progs.global_func); + libbbpf_func_info_cnt = bpf_program__func_info_cnt(skel->progs.global_func); + + if (!ASSERT_OK_PTR(libbpf_line_info, "bpf_program__line_info")) + goto out; + if (!ASSERT_EQ(libbbpf_line_info_cnt, info.nr_line_info, "line_info_cnt")) + goto out; + if (!ASSERT_OK_PTR(libbpf_func_info, "bpf_program__func_info")) + goto out; + if (!ASSERT_EQ(libbbpf_func_info_cnt, info.nr_func_info, "func_info_cnt")) + goto out; + ASSERT_MEMEQ(libbpf_line_info, line_info, libbbpf_line_info_cnt * sizeof(*line_info), + "line_info"); + ASSERT_MEMEQ(libbpf_func_info, func_info, libbbpf_func_info_cnt * sizeof(*func_info), + "func_info"); +out: + test_btf_ext__destroy(skel); +} + +void test_btf_ext(void) +{ + if (test__start_subtest("line_func_info")) + subtest_line_func_info(); +} diff --git a/tools/testing/selftests/bpf/progs/test_btf_ext.c b/tools/testing/selftests/bpf/progs/test_btf_ext.c new file mode 100644 index 000000000000..cdf20331db04 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_btf_ext.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2025 Meta Platforms Inc. */ + +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +__noinline static void f0(void) +{ + __u64 a = 1; + + __sink(a); +} + +SEC("xdp") +__u64 global_func(struct xdp_md *xdp) +{ + f0(); + return XDP_DROP; +} From 243911982aa9faf4361aa952f879331ad66933fe Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Mon, 7 Apr 2025 11:57:51 +0800 Subject: [PATCH 008/135] bpf: Check link_create.flags parameter for multi_kprobe The link_create.flags are currently not used for multi-kprobes, so return -EINVAL if it is set, same as for other attach APIs. We allow target_fd, on the other hand, to have an arbitrary value for multi-kprobe, as there are existing users (libbpf) relying on this. Fixes: 0dcac2725406 ("bpf: Add multi kprobe link") Signed-off-by: Tao Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250407035752.1108927-1-chen.dylane@linux.dev --- kernel/trace/bpf_trace.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 187dc37d61d4..ec19942321e6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2987,6 +2987,9 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (sizeof(u64) != sizeof(void *)) return -EOPNOTSUPP; + if (attr->link_create.flags) + return -EINVAL; + if (!is_kprobe_multi(prog)) return -EINVAL; From a76116f422c442ab691b4dcabb25613486d34360 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Mon, 7 Apr 2025 11:57:52 +0800 Subject: [PATCH 009/135] bpf: Check link_create.flags parameter for multi_uprobe The link_create.flags are currently not used for multi-uprobes, so return -EINVAL if it is set, same as for other attach APIs. We allow target_fd to have an arbitrary value for multi-uprobe, though, as there are existing users (libbpf) relying on this. Fixes: 89ae89f53d20 ("bpf: Add multi uprobe link") Signed-off-by: Tao Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250407035752.1108927-2-chen.dylane@linux.dev --- kernel/trace/bpf_trace.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index ec19942321e6..0f5906f43d7c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3379,6 +3379,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (sizeof(u64) != sizeof(void *)) return -EOPNOTSUPP; + if (attr->link_create.flags) + return -EINVAL; + if (!is_uprobe_multi(prog)) return -EINVAL; From 967e8def1100cb4b08c28a54d27ce69563fdf281 Mon Sep 17 00:00:00 2001 From: Saket Kumar Bhaskar Date: Wed, 9 Apr 2025 15:26:33 +0530 Subject: [PATCH 010/135] selftests/bpf: Fix bpf_nf selftest failure For systems with missing iptables-legacy tool this selftest fails. Add check to find if iptables-legacy tool is available and skip the test if the tool is missing. Fixes: de9c8d848d90 ("selftests/bpf: S/iptables/iptables-legacy/ in the bpf_nf and xdp_synproxy test") Signed-off-by: Saket Kumar Bhaskar Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250409095633.33653-1-skb99@linux.ibm.com --- tools/testing/selftests/bpf/prog_tests/bpf_nf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index dbd13f8e42a7..dd6512fa652b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -63,6 +63,12 @@ static void test_bpf_nf_ct(int mode) .repeat = 1, ); + if (SYS_NOFAIL("iptables-legacy --version")) { + fprintf(stdout, "Missing required iptables-legacy tool\n"); + test__skip(); + return; + } + skel = test_bpf_nf__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) return; From 54a3ecaeeeae8176da8badbd7d72af1017032c39 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Wed, 19 Feb 2025 13:20:14 +0800 Subject: [PATCH 011/135] bpf: fix ktls panic with sockmap [ 2172.936997] ------------[ cut here ]------------ [ 2172.936999] kernel BUG at lib/iov_iter.c:629! ...... [ 2172.944996] PKRU: 55555554 [ 2172.945155] Call Trace: [ 2172.945299] [ 2172.945428] ? die+0x36/0x90 [ 2172.945601] ? do_trap+0xdd/0x100 [ 2172.945795] ? iov_iter_revert+0x178/0x180 [ 2172.946031] ? iov_iter_revert+0x178/0x180 [ 2172.946267] ? do_error_trap+0x7d/0x110 [ 2172.946499] ? iov_iter_revert+0x178/0x180 [ 2172.946736] ? exc_invalid_op+0x50/0x70 [ 2172.946961] ? iov_iter_revert+0x178/0x180 [ 2172.947197] ? asm_exc_invalid_op+0x1a/0x20 [ 2172.947446] ? iov_iter_revert+0x178/0x180 [ 2172.947683] ? iov_iter_revert+0x5c/0x180 [ 2172.947913] tls_sw_sendmsg_locked.isra.0+0x794/0x840 [ 2172.948206] tls_sw_sendmsg+0x52/0x80 [ 2172.948420] ? inet_sendmsg+0x1f/0x70 [ 2172.948634] __sys_sendto+0x1cd/0x200 [ 2172.948848] ? find_held_lock+0x2b/0x80 [ 2172.949072] ? syscall_trace_enter+0x140/0x270 [ 2172.949330] ? __lock_release.isra.0+0x5e/0x170 [ 2172.949595] ? find_held_lock+0x2b/0x80 [ 2172.949817] ? syscall_trace_enter+0x140/0x270 [ 2172.950211] ? lockdep_hardirqs_on_prepare+0xda/0x190 [ 2172.950632] ? ktime_get_coarse_real_ts64+0xc2/0xd0 [ 2172.951036] __x64_sys_sendto+0x24/0x30 [ 2172.951382] do_syscall_64+0x90/0x170 ...... After calling bpf_exec_tx_verdict(), the size of msg_pl->sg may increase, e.g., when the BPF program executes bpf_msg_push_data(). If the BPF program sets cork_bytes and sg.size is smaller than cork_bytes, it will return -ENOSPC and attempt to roll back to the non-zero copy logic. However, during rollback, msg->msg_iter is reset, but since msg_pl->sg.size has been increased, subsequent executions will exceed the actual size of msg_iter. ''' iov_iter_revert(&msg->msg_iter, msg_pl->sg.size - orig_size); ''' The changes in this commit are based on the following considerations: 1. When cork_bytes is set, rolling back to non-zero copy logic is pointless and can directly go to zero-copy logic. 2. We can not calculate the correct number of bytes to revert msg_iter. Assume the original data is "abcdefgh" (8 bytes), and after 3 pushes by the BPF program, it becomes 11-byte data: "abc?de?fgh?". Then, we set cork_bytes to 6, which means the first 6 bytes have been processed, and the remaining 5 bytes "?fgh?" will be cached until the length meets the cork_bytes requirement. However, some data in "?fgh?" is not within 'sg->msg_iter' (but in msg_pl instead), especially the data "?" we pushed. So it doesn't seem as simple as just reverting through an offset of msg_iter. 3. For non-TLS sockets in tcp_bpf_sendmsg, when a "cork" situation occurs, the user-space send() doesn't return an error, and the returned length is the same as the input length parameter, even if some data is cached. Additionally, I saw that the current non-zero-copy logic for handling corking is written as: ''' line 1177 else if (ret != -EAGAIN) { if (ret == -ENOSPC) ret = 0; goto send_end; ''' So it's ok to just return 'copied' without error when a "cork" situation occurs. Fixes: fcb14cb1bdac ("new iov_iter flavour - ITER_UBUF") Fixes: d3b18ad31f93 ("tls: add bpf support to sk_msg handling") Signed-off-by: Jiayuan Chen Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250219052015.274405-2-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- net/tls/tls_sw.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 914d4e1516a3..f3d7d19482da 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1120,9 +1120,13 @@ alloc_encrypted: num_async++; else if (ret == -ENOMEM) goto wait_for_memory; - else if (ctx->open_rec && ret == -ENOSPC) + else if (ctx->open_rec && ret == -ENOSPC) { + if (msg_pl->cork_bytes) { + ret = 0; + goto send_end; + } goto rollback_iter; - else if (ret != -EAGAIN) + } else if (ret != -EAGAIN) goto send_end; } continue; From 05ebde1bcb50a71cd56d8edd3008f53a781146e9 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Wed, 19 Feb 2025 13:20:15 +0800 Subject: [PATCH 012/135] selftests/bpf: add ktls selftest add ktls selftest for sockmap Test results: sockmap_ktls/sockmap_ktls disconnect_after_delete IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls update_fails_when_sock_has_ulp IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls disconnect_after_delete IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls update_fails_when_sock_has_ulp IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls disconnect_after_delete IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls update_fails_when_sock_has_ulp IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls disconnect_after_delete IPv4 SOCKMAP:OK sockmap_ktls/sockmap_ktls update_fails_when_sock_has_ulp IPv4 SOCKMAP:OK sockmap_ktls/tls simple offload:OK sockmap_ktls/tls tx cork:OK sockmap_ktls/tls tx cork with push:OK sockmap_ktls/tls simple offload:OK sockmap_ktls/tls tx cork:OK sockmap_ktls/tls tx cork with push:OK sockmap_ktls:OK Signed-off-by: Jiayuan Chen Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250219052015.274405-3-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/sockmap_ktls.c | 174 +++++++++++++++++- .../selftests/bpf/progs/test_sockmap_ktls.c | 26 +++ 2 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/progs/test_sockmap_ktls.c diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 2d0796314862..49b85c1c7552 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -3,13 +3,64 @@ /* * Tests for sockmap/sockhash holding kTLS sockets. */ - +#include #include +#include #include "test_progs.h" +#include "sockmap_helpers.h" +#include "test_skmsg_load_helpers.skel.h" +#include "test_sockmap_ktls.skel.h" #define MAX_TEST_NAME 80 #define TCP_ULP 31 +static int init_ktls_pairs(int c, int p) +{ + int err; + struct tls12_crypto_info_aes_gcm_128 crypto_rx; + struct tls12_crypto_info_aes_gcm_128 crypto_tx; + + err = setsockopt(c, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto out; + + err = setsockopt(p, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto out; + + memset(&crypto_rx, 0, sizeof(crypto_rx)); + memset(&crypto_tx, 0, sizeof(crypto_tx)); + crypto_rx.info.version = TLS_1_2_VERSION; + crypto_tx.info.version = TLS_1_2_VERSION; + crypto_rx.info.cipher_type = TLS_CIPHER_AES_GCM_128; + crypto_tx.info.cipher_type = TLS_CIPHER_AES_GCM_128; + + err = setsockopt(c, SOL_TLS, TLS_TX, &crypto_tx, sizeof(crypto_tx)); + if (!ASSERT_OK(err, "setsockopt(TLS_TX)")) + goto out; + + err = setsockopt(p, SOL_TLS, TLS_RX, &crypto_rx, sizeof(crypto_rx)); + if (!ASSERT_OK(err, "setsockopt(TLS_RX)")) + goto out; + return 0; +out: + return -1; +} + +static int create_ktls_pairs(int family, int sotype, int *c, int *p) +{ + int err; + + err = create_pair(family, sotype, c, p); + if (!ASSERT_OK(err, "create_pair()")) + return -1; + + err = init_ktls_pairs(*c, *p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + return -1; + return 0; +} + static int tcp_server(int family) { int err, s; @@ -146,6 +197,115 @@ static const char *fmt_test_name(const char *subtest_name, int family, return test_name; } +static void test_sockmap_ktls_offload(int family, int sotype) +{ + int err; + int c = 0, p = 0, sent, recvd; + char msg[12] = "hello world\0"; + char rcv[13]; + + err = create_ktls_pairs(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_ktls_pairs()")) + goto out; + + sent = send(c, msg, sizeof(msg), 0); + if (!ASSERT_OK(err, "send(msg)")) + goto out; + + recvd = recv(p, rcv, sizeof(rcv), 0); + if (!ASSERT_OK(err, "recv(msg)") || + !ASSERT_EQ(recvd, sent, "length mismatch")) + goto out; + + ASSERT_OK(memcmp(msg, rcv, sizeof(msg)), "data mismatch"); + +out: + if (c) + close(c); + if (p) + close(p); +} + +static void test_sockmap_ktls_tx_cork(int family, int sotype, bool push) +{ + int err, off; + int i, j; + int start_push = 0, push_len = 0; + int c = 0, p = 0, one = 1, sent, recvd; + int prog_fd, map_fd; + char msg[12] = "hello world\0"; + char rcv[20] = {0}; + struct test_sockmap_ktls *skel; + + skel = test_sockmap_ktls__open_and_load(); + if (!ASSERT_TRUE(skel, "open ktls skel")) + return; + + err = create_pair(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.prog_sk_policy); + map_fd = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach sk msg")) + goto out; + + err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c)")) + goto out; + + err = init_ktls_pairs(c, p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + goto out; + + skel->bss->cork_byte = sizeof(msg); + if (push) { + start_push = 1; + push_len = 2; + } + skel->bss->push_start = start_push; + skel->bss->push_end = push_len; + + off = sizeof(msg) / 2; + sent = send(c, msg, off, 0); + if (!ASSERT_EQ(sent, off, "send(msg)")) + goto out; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_EQ(-1, recvd, "expected no data")) + goto out; + + /* send remaining msg */ + sent = send(c, msg + off, sizeof(msg) - off, 0); + if (!ASSERT_EQ(sent, sizeof(msg) - off, "send remaining data")) + goto out; + + recvd = recv_timeout(p, rcv, sizeof(rcv), MSG_DONTWAIT, 1); + if (!ASSERT_OK(err, "recv(msg)") || + !ASSERT_EQ(recvd, sizeof(msg) + push_len, "check length mismatch")) + goto out; + + for (i = 0, j = 0; i < recvd;) { + /* skip checking the data that has been pushed in */ + if (i >= start_push && i <= start_push + push_len - 1) { + i++; + continue; + } + if (!ASSERT_EQ(rcv[i], msg[j], "data mismatch")) + goto out; + i++; + j++; + } +out: + if (c) + close(c); + if (p) + close(p); + test_sockmap_ktls__destroy(skel); +} + static void run_tests(int family, enum bpf_map_type map_type) { int map; @@ -162,10 +322,22 @@ static void run_tests(int family, enum bpf_map_type map_type) close(map); } +static void run_ktls_test(int family, int sotype) +{ + if (test__start_subtest("tls simple offload")) + test_sockmap_ktls_offload(family, sotype); + if (test__start_subtest("tls tx cork")) + test_sockmap_ktls_tx_cork(family, sotype, false); + if (test__start_subtest("tls tx cork with push")) + test_sockmap_ktls_tx_cork(family, sotype, true); +} + void test_sockmap_ktls(void) { run_tests(AF_INET, BPF_MAP_TYPE_SOCKMAP); run_tests(AF_INET, BPF_MAP_TYPE_SOCKHASH); run_tests(AF_INET6, BPF_MAP_TYPE_SOCKMAP); run_tests(AF_INET6, BPF_MAP_TYPE_SOCKHASH); + run_ktls_test(AF_INET, SOCK_STREAM); + run_ktls_test(AF_INET6, SOCK_STREAM); } diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c new file mode 100644 index 000000000000..e0f757929ef4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +int cork_byte; +int push_start; +int push_end; + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map SEC(".maps"); + +SEC("sk_msg") +int prog_sk_policy(struct sk_msg_md *msg) +{ + if (cork_byte > 0) + bpf_msg_cork_bytes(msg, cork_byte); + if (push_start > 0 && push_end > 0) + bpf_msg_push_data(msg, push_start, push_end, 0); + + return SK_PASS; +} From 7683167196bd727ad5f3c3fc6a9ca70f54520a81 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Mon, 7 Apr 2025 22:21:20 +0800 Subject: [PATCH 013/135] bpf, sockmap: Fix data lost during EAGAIN retries We call skb_bpf_redirect_clear() to clean _sk_redir before handling skb in backlog, but when sk_psock_handle_skb() return EAGAIN due to sk_rcvbuf limit, the redirect info in _sk_redir is not recovered. Fix skb redir loss during EAGAIN retries by restoring _sk_redir information using skb_bpf_set_redir(). Before this patch: ''' ./bench sockmap -c 2 -p 1 -a --rx-verdict-ingress Setting up benchmark 'sockmap'... create socket fd c1:13 p1:14 c2:15 p2:16 Benchmark 'sockmap' started. Send Speed 1343.172 MB/s, BPF Speed 1343.238 MB/s, Rcv Speed 65.271 MB/s Send Speed 1352.022 MB/s, BPF Speed 1352.088 MB/s, Rcv Speed 0 MB/s Send Speed 1354.105 MB/s, BPF Speed 1354.105 MB/s, Rcv Speed 0 MB/s Send Speed 1355.018 MB/s, BPF Speed 1354.887 MB/s, Rcv Speed 0 MB/s ''' Due to the high send rate, the RX processing path may frequently hit the sk_rcvbuf limit. Once triggered, incorrect _sk_redir will cause the flow to mistakenly enter the "!ingress" path, leading to send failures. (The Rcv speed depends on tcp_rmem). After this patch: ''' ./bench sockmap -c 2 -p 1 -a --rx-verdict-ingress Setting up benchmark 'sockmap'... create socket fd c1:13 p1:14 c2:15 p2:16 Benchmark 'sockmap' started. Send Speed 1347.236 MB/s, BPF Speed 1347.367 MB/s, Rcv Speed 65.402 MB/s Send Speed 1353.320 MB/s, BPF Speed 1353.320 MB/s, Rcv Speed 65.536 MB/s Send Speed 1353.186 MB/s, BPF Speed 1353.121 MB/s, Rcv Speed 65.536 MB/s ''' Signed-off-by: Jiayuan Chen Link: https://lore.kernel.org/r/20250407142234.47591-2-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- net/core/skmsg.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 0ddc4c718833..29cb5ffd56c0 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -680,7 +680,8 @@ static void sk_psock_backlog(struct work_struct *work) if (ret <= 0) { if (ret == -EAGAIN) { sk_psock_skb_state(psock, state, len, off); - + /* Restore redir info we cleared before */ + skb_bpf_set_redir(skb, psock->sk, ingress); /* Delay slightly to prioritize any * other work that might be here. */ From 3b4f14b794287be137ea2c6158765d1ea1e018a4 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Mon, 7 Apr 2025 22:21:21 +0800 Subject: [PATCH 014/135] bpf, sockmap: fix duplicated data transmission In the !ingress path under sk_psock_handle_skb(), when sending data to the remote under snd_buf limitations, partial skb data might be transmitted. Although we preserved the partial transmission state (offset/length), the state wasn't properly consumed during retries. This caused the retry path to resend the entire skb data instead of continuing from the previous offset, resulting in data overlap at the receiver side. Fixes: 405df89dd52c ("bpf, sockmap: Improved check for empty queue") Signed-off-by: Jiayuan Chen Link: https://lore.kernel.org/r/20250407142234.47591-3-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- net/core/skmsg.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 29cb5ffd56c0..9533b3e40ad7 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -656,11 +656,6 @@ static void sk_psock_backlog(struct work_struct *work) int ret; mutex_lock(&psock->work_mutex); - if (unlikely(state->len)) { - len = state->len; - off = state->off; - } - while ((skb = skb_peek(&psock->ingress_skb))) { len = skb->len; off = 0; @@ -670,6 +665,13 @@ static void sk_psock_backlog(struct work_struct *work) off = stm->offset; len = stm->full_len; } + + /* Resume processing from previous partial state */ + if (unlikely(state->len)) { + len = state->len; + off = state->off; + } + ingress = skb_bpf_ingress(skb); skb_bpf_redirect_clear(skb); do { @@ -698,6 +700,8 @@ static void sk_psock_backlog(struct work_struct *work) len -= ret; } while (len); + /* The entire skb sent, clear state */ + sk_psock_skb_state(psock, state, 0, 0); skb = skb_dequeue(&psock->ingress_skb); kfree_skb(skb); } From 5ca2e29f6834c64c0e5a9ccf1278c21fb49b827e Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Mon, 7 Apr 2025 22:21:22 +0800 Subject: [PATCH 015/135] bpf, sockmap: Fix panic when calling skb_linearize MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The panic can be reproduced by executing the command: ./bench sockmap -c 2 -p 1 -a --rx-verdict-ingress --rx-strp 100000 Then a kernel panic was captured: ''' [ 657.460555] kernel BUG at net/core/skbuff.c:2178! [ 657.462680] Tainted: [W]=WARN [ 657.463287] Workqueue: events sk_psock_backlog ... [ 657.469610] [ 657.469738] ? die+0x36/0x90 [ 657.469916] ? do_trap+0x1d0/0x270 [ 657.470118] ? pskb_expand_head+0x612/0xf40 [ 657.470376] ? pskb_expand_head+0x612/0xf40 [ 657.470620] ? do_error_trap+0xa3/0x170 [ 657.470846] ? pskb_expand_head+0x612/0xf40 [ 657.471092] ? handle_invalid_op+0x2c/0x40 [ 657.471335] ? pskb_expand_head+0x612/0xf40 [ 657.471579] ? exc_invalid_op+0x2d/0x40 [ 657.471805] ? asm_exc_invalid_op+0x1a/0x20 [ 657.472052] ? pskb_expand_head+0xd1/0xf40 [ 657.472292] ? pskb_expand_head+0x612/0xf40 [ 657.472540] ? lock_acquire+0x18f/0x4e0 [ 657.472766] ? find_held_lock+0x2d/0x110 [ 657.472999] ? __pfx_pskb_expand_head+0x10/0x10 [ 657.473263] ? __kmalloc_cache_noprof+0x5b/0x470 [ 657.473537] ? __pfx___lock_release.isra.0+0x10/0x10 [ 657.473826] __pskb_pull_tail+0xfd/0x1d20 [ 657.474062] ? __kasan_slab_alloc+0x4e/0x90 [ 657.474707] sk_psock_skb_ingress_enqueue+0x3bf/0x510 [ 657.475392] ? __kasan_kmalloc+0xaa/0xb0 [ 657.476010] sk_psock_backlog+0x5cf/0xd70 [ 657.476637] process_one_work+0x858/0x1a20 ''' The panic originates from the assertion BUG_ON(skb_shared(skb)) in skb_linearize(). A previous commit(see Fixes tag) introduced skb_get() to avoid race conditions between skb operations in the backlog and skb release in the recvmsg path. However, this caused the panic to always occur when skb_linearize is executed. The "--rx-strp 100000" parameter forces the RX path to use the strparser module which aggregates data until it reaches 100KB before calling sockmap logic. The 100KB payload exceeds MAX_MSG_FRAGS, triggering skb_linearize. To fix this issue, just move skb_get into sk_psock_skb_ingress_enqueue. ''' sk_psock_backlog: sk_psock_handle_skb skb_get(skb) <== we move it into 'sk_psock_skb_ingress_enqueue' sk_psock_skb_ingress____________ ↓ | | → sk_psock_skb_ingress_self | sk_psock_skb_ingress_enqueue sk_psock_verdict_apply_________________↑ skb_linearize ''' Note that for verdict_apply path, the skb_get operation is unnecessary so we add 'take_ref' param to control it's behavior. Fixes: a454d84ee20b ("bpf, sockmap: Fix skb refcnt race after locking changes") Signed-off-by: Jiayuan Chen Link: https://lore.kernel.org/r/20250407142234.47591-4-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- net/core/skmsg.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 9533b3e40ad7..276934673066 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -530,16 +530,22 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, u32 off, u32 len, struct sk_psock *psock, struct sock *sk, - struct sk_msg *msg) + struct sk_msg *msg, + bool take_ref) { int num_sge, copied; + /* skb_to_sgvec will fail when the total number of fragments in + * frag_list and frags exceeds MAX_MSG_FRAGS. For example, the + * caller may aggregate multiple skbs. + */ num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); if (num_sge < 0) { /* skb linearize may fail with ENOMEM, but lets simply try again * later if this happens. Under memory pressure we don't want to * drop the skb. We need to linearize the skb so that the mapping * in skb_to_sgvec can not error. + * Note that skb_linearize requires the skb not to be shared. */ if (skb_linearize(skb)) return -EAGAIN; @@ -556,7 +562,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, msg->sg.start = 0; msg->sg.size = copied; msg->sg.end = num_sge; - msg->skb = skb; + msg->skb = take_ref ? skb_get(skb) : skb; sk_psock_queue_msg(psock, msg); sk_psock_data_ready(sk, psock); @@ -564,7 +570,7 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, } static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, - u32 off, u32 len); + u32 off, u32 len, bool take_ref); static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len) @@ -578,7 +584,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * correctly. */ if (unlikely(skb->sk == sk)) - return sk_psock_skb_ingress_self(psock, skb, off, len); + return sk_psock_skb_ingress_self(psock, skb, off, len, true); msg = sk_psock_create_ingress_msg(sk, skb); if (!msg) return -EAGAIN; @@ -590,7 +596,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * into user buffers. */ skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true); if (err < 0) kfree(msg); return err; @@ -601,7 +607,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, * because the skb is already accounted for here. */ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, - u32 off, u32 len) + u32 off, u32 len, bool take_ref) { struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); struct sock *sk = psock->sk; @@ -610,7 +616,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb if (unlikely(!msg)) return -EAGAIN; skb_set_owner_r(skb, sk); - err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); + err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref); if (err < 0) kfree(msg); return err; @@ -619,18 +625,13 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, u32 off, u32 len, bool ingress) { - int err = 0; - if (!ingress) { if (!sock_writeable(psock->sk)) return -EAGAIN; return skb_send_sock(psock->sk, skb, off, len); } - skb_get(skb); - err = sk_psock_skb_ingress(psock, skb, off, len); - if (err < 0) - kfree_skb(skb); - return err; + + return sk_psock_skb_ingress(psock, skb, off, len); } static void sk_psock_skb_state(struct sk_psock *psock, @@ -1019,7 +1020,7 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, off = stm->offset; len = stm->full_len; } - err = sk_psock_skb_ingress_self(psock, skb, off, len); + err = sk_psock_skb_ingress_self(psock, skb, off, len, false); } if (err < 0) { spin_lock_bh(&psock->ingress_lock); From 7b2fa44de5e718a3053dea37e4a3d893b0f40e42 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Mon, 7 Apr 2025 22:21:23 +0800 Subject: [PATCH 016/135] selftest/bpf/benchs: Add benchmark for sockmap usage Add TCP+sockmap-based benchmark. Since sockmap's own update and delete operations are generally less critical, the performance of the fast forwarding framework built upon it is the key aspect. Also with cgset/cgexec, we can observe the behavior of sockmap under memory pressure. The benchmark can be run with: ''' ./bench sockmap -c 2 -p 1 -a --rx-verdict-ingress ''' In the future, we plan to move socket_helpers.h out of the prog_tests directory to make it accessible for the benchmark. This will enable better support for various socket types. Signed-off-by: Jiayuan Chen Link: https://lore.kernel.org/r/20250407142234.47591-5-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 2 + tools/testing/selftests/bpf/bench.c | 4 + .../selftests/bpf/benchs/bench_sockmap.c | 599 ++++++++++++++++++ .../selftests/bpf/progs/bench_sockmap_prog.c | 65 ++ 4 files changed, 670 insertions(+) create mode 100644 tools/testing/selftests/bpf/benchs/bench_sockmap.c create mode 100644 tools/testing/selftests/bpf/progs/bench_sockmap_prog.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 66bb50356be0..03663222a0a5 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -811,6 +811,7 @@ $(OUTPUT)/bench_local_storage_create.o: $(OUTPUT)/bench_local_storage_create.ske $(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h $(OUTPUT)/bench_htab_mem.o: $(OUTPUT)/htab_mem_bench.skel.h $(OUTPUT)/bench_bpf_crypto.o: $(OUTPUT)/crypto_bench.skel.h +$(OUTPUT)/bench_sockmap.o: $(OUTPUT)/bench_sockmap_prog.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm $(OUTPUT)/bench: $(OUTPUT)/bench.o \ @@ -831,6 +832,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \ $(OUTPUT)/bench_local_storage_create.o \ $(OUTPUT)/bench_htab_mem.o \ $(OUTPUT)/bench_bpf_crypto.o \ + $(OUTPUT)/bench_sockmap.o \ # $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 1bd403a5ef7b..c80df9b75094 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -283,6 +283,7 @@ extern struct argp bench_local_storage_create_argp; extern struct argp bench_htab_mem_argp; extern struct argp bench_trigger_batch_argp; extern struct argp bench_crypto_argp; +extern struct argp bench_sockmap_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, @@ -297,6 +298,7 @@ static const struct argp_child bench_parsers[] = { { &bench_htab_mem_argp, 0, "hash map memory benchmark", 0 }, { &bench_trigger_batch_argp, 0, "BPF triggering benchmark", 0 }, { &bench_crypto_argp, 0, "bpf crypto benchmark", 0 }, + { &bench_sockmap_argp, 0, "bpf sockmap benchmark", 0 }, {}, }; @@ -549,6 +551,7 @@ extern const struct bench bench_local_storage_create; extern const struct bench bench_htab_mem; extern const struct bench bench_crypto_encrypt; extern const struct bench bench_crypto_decrypt; +extern const struct bench bench_sockmap; static const struct bench *benchs[] = { &bench_count_global, @@ -609,6 +612,7 @@ static const struct bench *benchs[] = { &bench_htab_mem, &bench_crypto_encrypt, &bench_crypto_decrypt, + &bench_sockmap, }; static void find_benchmark(void) diff --git a/tools/testing/selftests/bpf/benchs/bench_sockmap.c b/tools/testing/selftests/bpf/benchs/bench_sockmap.c new file mode 100644 index 000000000000..54f4e7c03cd2 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_sockmap.c @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bench.h" +#include "bench_sockmap_prog.skel.h" + +#define FILE_SIZE (128 * 1024) +#define DATA_REPEAT_SIZE 10 + +static const char snd_data[DATA_REPEAT_SIZE] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + +/* c1 <-> [p1, p2] <-> c2 + * RX bench(BPF_SK_SKB_STREAM_VERDICT): + * ARG_FW_RX_PASS: + * send(p2) -> recv(c2) -> bpf skb passthrough -> recv(c2) + * ARG_FW_RX_VERDICT_EGRESS: + * send(c1) -> verdict skb to tx queuec of p2 -> recv(c2) + * ARG_FW_RX_VERDICT_INGRESS: + * send(c1) -> verdict skb to rx queuec of c2 -> recv(c2) + * + * TX bench(BPF_SK_MSG_VERDIC): + * ARG_FW_TX_PASS: + * send(p2) -> bpf msg passthrough -> send(p2) -> recv(c2) + * ARG_FW_TX_VERDICT_INGRESS: + * send(p2) -> verdict msg to rx queue of c2 -> recv(c2) + * ARG_FW_TX_VERDICT_EGRESS: + * send(p1) -> verdict msg to tx queue of p2 -> recv(c2) + */ +enum SOCKMAP_ARG_FLAG { + ARG_FW_RX_NORMAL = 11000, + ARG_FW_RX_PASS, + ARG_FW_RX_VERDICT_EGRESS, + ARG_FW_RX_VERDICT_INGRESS, + ARG_FW_TX_NORMAL, + ARG_FW_TX_PASS, + ARG_FW_TX_VERDICT_INGRESS, + ARG_FW_TX_VERDICT_EGRESS, + ARG_CTL_RX_STRP, + ARG_CONSUMER_DELAY_TIME, + ARG_PRODUCER_DURATION, +}; + +#define TXMODE_NORMAL() \ + ((ctx.mode) == ARG_FW_TX_NORMAL) + +#define TXMODE_BPF_INGRESS() \ + ((ctx.mode) == ARG_FW_TX_VERDICT_INGRESS) + +#define TXMODE_BPF_EGRESS() \ + ((ctx.mode) == ARG_FW_TX_VERDICT_EGRESS) + +#define TXMODE_BPF_PASS() \ + ((ctx.mode) == ARG_FW_TX_PASS) + +#define TXMODE_BPF() ( \ + TXMODE_BPF_PASS() || \ + TXMODE_BPF_INGRESS() || \ + TXMODE_BPF_EGRESS()) + +#define TXMODE() ( \ + TXMODE_NORMAL() || \ + TXMODE_BPF()) + +#define RXMODE_NORMAL() \ + ((ctx.mode) == ARG_FW_RX_NORMAL) + +#define RXMODE_BPF_PASS() \ + ((ctx.mode) == ARG_FW_RX_PASS) + +#define RXMODE_BPF_VERDICT_EGRESS() \ + ((ctx.mode) == ARG_FW_RX_VERDICT_EGRESS) + +#define RXMODE_BPF_VERDICT_INGRESS() \ + ((ctx.mode) == ARG_FW_RX_VERDICT_INGRESS) + +#define RXMODE_BPF_VERDICT() ( \ + RXMODE_BPF_VERDICT_INGRESS() || \ + RXMODE_BPF_VERDICT_EGRESS()) + +#define RXMODE_BPF() ( \ + RXMODE_BPF_PASS() || \ + RXMODE_BPF_VERDICT()) + +#define RXMODE() ( \ + RXMODE_NORMAL() || \ + RXMODE_BPF()) + +static struct socmap_ctx { + struct bench_sockmap_prog *skel; + enum SOCKMAP_ARG_FLAG mode; + #define c1 fds[0] + #define p1 fds[1] + #define c2 fds[2] + #define p2 fds[3] + #define sfd fds[4] + int fds[5]; + long send_calls; + long read_calls; + long prod_send; + long user_read; + int file_size; + int delay_consumer; + int prod_run_time; + int strp_size; +} ctx = { + .prod_send = 0, + .user_read = 0, + .file_size = FILE_SIZE, + .mode = ARG_FW_RX_VERDICT_EGRESS, + .fds = {0}, + .delay_consumer = 0, + .prod_run_time = 0, + .strp_size = 0, +}; + +static void bench_sockmap_prog_destroy(void) +{ + int i; + + for (i = 0; i < sizeof(ctx.fds); i++) { + if (ctx.fds[0] > 0) + close(ctx.fds[i]); + } + + bench_sockmap_prog__destroy(ctx.skel); +} + +static void init_addr(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss)); + + addr4->sin_family = AF_INET; + addr4->sin_port = 0; + addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK); + *len = sizeof(*addr4); +} + +static bool set_non_block(int fd, bool blocking) +{ + int flags = fcntl(fd, F_GETFL, 0); + + if (flags == -1) + return false; + flags = blocking ? (flags | O_NONBLOCK) : (flags & ~O_NONBLOCK); + return (fcntl(fd, F_SETFL, flags) == 0); +} + +static int create_pair(int *c, int *p, int type) +{ + struct sockaddr_storage addr; + int err, cfd, pfd; + socklen_t addr_len = sizeof(struct sockaddr_storage); + + err = getsockname(ctx.sfd, (struct sockaddr *)&addr, &addr_len); + if (err) { + fprintf(stderr, "getsockname error %d\n", errno); + return err; + } + cfd = socket(AF_INET, type, 0); + if (cfd < 0) { + fprintf(stderr, "socket error %d\n", errno); + return err; + } + + err = connect(cfd, (struct sockaddr *)&addr, addr_len); + if (err && errno != EINPROGRESS) { + fprintf(stderr, "connect error %d\n", errno); + return err; + } + + pfd = accept(ctx.sfd, NULL, NULL); + if (pfd < 0) { + fprintf(stderr, "accept error %d\n", errno); + return err; + } + *c = cfd; + *p = pfd; + return 0; +} + +static int create_sockets(void) +{ + struct sockaddr_storage addr; + int err, one = 1; + socklen_t addr_len; + + init_addr(&addr, &addr_len); + ctx.sfd = socket(AF_INET, SOCK_STREAM, 0); + if (ctx.sfd < 0) { + fprintf(stderr, "socket error:%d\n", errno); + return ctx.sfd; + } + err = setsockopt(ctx.sfd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); + if (err) { + fprintf(stderr, "setsockopt error:%d\n", errno); + return err; + } + + err = bind(ctx.sfd, (struct sockaddr *)&addr, addr_len); + if (err) { + fprintf(stderr, "bind error:%d\n", errno); + return err; + } + + err = listen(ctx.sfd, SOMAXCONN); + if (err) { + fprintf(stderr, "listen error:%d\n", errno); + return err; + } + + err = create_pair(&ctx.c1, &ctx.p1, SOCK_STREAM); + if (err) { + fprintf(stderr, "create_pair 1 error\n"); + return err; + } + + err = create_pair(&ctx.c2, &ctx.p2, SOCK_STREAM); + if (err) { + fprintf(stderr, "create_pair 2 error\n"); + return err; + } + printf("create socket fd c1:%d p1:%d c2:%d p2:%d\n", + ctx.c1, ctx.p1, ctx.c2, ctx.p2); + return 0; +} + +static void validate(void) +{ + if (env.consumer_cnt != 2 || env.producer_cnt != 1 || + !env.affinity) + goto err; + return; +err: + fprintf(stderr, "argument '-c 2 -p 1 -a' is necessary"); + exit(1); +} + +static int setup_rx_sockmap(void) +{ + int verdict, pass, parser, map; + int zero = 0, one = 1; + int err; + + parser = bpf_program__fd(ctx.skel->progs.prog_skb_parser); + verdict = bpf_program__fd(ctx.skel->progs.prog_skb_verdict); + pass = bpf_program__fd(ctx.skel->progs.prog_skb_pass); + map = bpf_map__fd(ctx.skel->maps.sock_map_rx); + + if (ctx.strp_size != 0) { + ctx.skel->bss->pkt_size = ctx.strp_size; + err = bpf_prog_attach(parser, map, BPF_SK_SKB_STREAM_PARSER, 0); + if (err) + return err; + } + + if (RXMODE_BPF_VERDICT()) + err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0); + else if (RXMODE_BPF_PASS()) + err = bpf_prog_attach(pass, map, BPF_SK_SKB_STREAM_VERDICT, 0); + if (err) + return err; + + if (RXMODE_BPF_PASS()) + return bpf_map_update_elem(map, &zero, &ctx.c2, BPF_NOEXIST); + + err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST); + if (err < 0) + return err; + + if (RXMODE_BPF_VERDICT_INGRESS()) { + ctx.skel->bss->verdict_dir = BPF_F_INGRESS; + err = bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST); + } else { + err = bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST); + } + if (err < 0) + return err; + + return 0; +} + +static int setup_tx_sockmap(void) +{ + int zero = 0, one = 1; + int prog, map; + int err; + + map = bpf_map__fd(ctx.skel->maps.sock_map_tx); + prog = TXMODE_BPF_PASS() ? + bpf_program__fd(ctx.skel->progs.prog_skmsg_pass) : + bpf_program__fd(ctx.skel->progs.prog_skmsg_verdict); + + err = bpf_prog_attach(prog, map, BPF_SK_MSG_VERDICT, 0); + if (err) + return err; + + if (TXMODE_BPF_EGRESS()) { + err = bpf_map_update_elem(map, &zero, &ctx.p1, BPF_NOEXIST); + err |= bpf_map_update_elem(map, &one, &ctx.p2, BPF_NOEXIST); + } else { + ctx.skel->bss->verdict_dir = BPF_F_INGRESS; + err = bpf_map_update_elem(map, &zero, &ctx.p2, BPF_NOEXIST); + err |= bpf_map_update_elem(map, &one, &ctx.c2, BPF_NOEXIST); + } + + if (err < 0) + return err; + + return 0; +} + +static void setup(void) +{ + int err; + + ctx.skel = bench_sockmap_prog__open_and_load(); + if (!ctx.skel) { + fprintf(stderr, "error loading skel\n"); + exit(1); + } + + if (create_sockets()) { + fprintf(stderr, "create_net_mode error\n"); + goto err; + } + + if (RXMODE_BPF()) { + err = setup_rx_sockmap(); + if (err) { + fprintf(stderr, "setup_rx_sockmap error:%d\n", err); + goto err; + } + } else if (TXMODE_BPF()) { + err = setup_tx_sockmap(); + if (err) { + fprintf(stderr, "setup_tx_sockmap error:%d\n", err); + goto err; + } + } else { + fprintf(stderr, "unknown sockmap bench mode: %d\n", ctx.mode); + goto err; + } + + return; + +err: + bench_sockmap_prog_destroy(); + exit(1); +} + +static void measure(struct bench_res *res) +{ + res->drops = atomic_swap(&ctx.prod_send, 0); + res->hits = atomic_swap(&ctx.skel->bss->process_byte, 0); + res->false_hits = atomic_swap(&ctx.user_read, 0); + res->important_hits = atomic_swap(&ctx.send_calls, 0); + res->important_hits |= atomic_swap(&ctx.read_calls, 0) << 32; +} + +static void verify_data(int *check_pos, char *buf, int rcv) +{ + for (int i = 0 ; i < rcv; i++) { + if (buf[i] != snd_data[(*check_pos) % DATA_REPEAT_SIZE]) { + fprintf(stderr, "verify data fail"); + exit(1); + } + (*check_pos)++; + if (*check_pos >= FILE_SIZE) + *check_pos = 0; + } +} + +static void *consumer(void *input) +{ + int rcv, sent; + int check_pos = 0; + int tid = (long)input; + int recv_buf_size = FILE_SIZE; + char *buf = malloc(recv_buf_size); + int delay_read = ctx.delay_consumer; + + if (!buf) { + fprintf(stderr, "fail to init read buffer"); + return NULL; + } + + while (true) { + if (tid == 1) { + /* consumer 1 is unused for tx test and stream verdict test */ + if (RXMODE_BPF() || TXMODE()) + return NULL; + /* it's only for RX_NORMAL which service as reserve-proxy mode */ + rcv = read(ctx.p1, buf, recv_buf_size); + if (rcv < 0) { + fprintf(stderr, "fail to read p1"); + return NULL; + } + + sent = send(ctx.p2, buf, recv_buf_size, 0); + if (sent < 0) { + fprintf(stderr, "fail to send p2"); + return NULL; + } + } else { + if (delay_read != 0) { + if (delay_read < 0) + return NULL; + sleep(delay_read); + delay_read = 0; + } + /* read real endpoint by consumer 0 */ + atomic_inc(&ctx.read_calls); + rcv = read(ctx.c2, buf, recv_buf_size); + if (rcv < 0 && errno != EAGAIN) { + fprintf(stderr, "%s fail to read c2 %d\n", __func__, errno); + return NULL; + } + verify_data(&check_pos, buf, rcv); + atomic_add(&ctx.user_read, rcv); + } + } + + return NULL; +} + +static void *producer(void *input) +{ + int off = 0, fp, need_sent, sent; + int file_size = ctx.file_size; + struct timespec ts1, ts2; + int target; + FILE *file; + + file = tmpfile(); + if (!file) { + fprintf(stderr, "create file for sendfile"); + return NULL; + } + + /* we need simple verify */ + for (int i = 0; i < file_size; i++) { + if (fwrite(&snd_data[off], sizeof(char), 1, file) != 1) { + fprintf(stderr, "init tmpfile error"); + return NULL; + } + if (++off >= sizeof(snd_data)) + off = 0; + } + fflush(file); + fseek(file, 0, SEEK_SET); + + fp = fileno(file); + need_sent = file_size; + clock_gettime(CLOCK_MONOTONIC, &ts1); + + if (RXMODE_BPF_VERDICT()) + target = ctx.c1; + else if (TXMODE_BPF_EGRESS()) + target = ctx.p1; + else + target = ctx.p2; + set_non_block(target, true); + while (true) { + if (ctx.prod_run_time) { + clock_gettime(CLOCK_MONOTONIC, &ts2); + if (ts2.tv_sec - ts1.tv_sec > ctx.prod_run_time) + return NULL; + } + + errno = 0; + atomic_inc(&ctx.send_calls); + sent = sendfile(target, fp, NULL, need_sent); + if (sent < 0) { + if (errno != EAGAIN && errno != ENOMEM && errno != ENOBUFS) { + fprintf(stderr, "sendfile return %d, errorno %d:%s\n", + sent, errno, strerror(errno)); + return NULL; + } + continue; + } else if (sent < need_sent) { + need_sent -= sent; + atomic_add(&ctx.prod_send, sent); + continue; + } + atomic_add(&ctx.prod_send, need_sent); + need_sent = file_size; + lseek(fp, 0, SEEK_SET); + } + + return NULL; +} + +static void report_progress(int iter, struct bench_res *res, long delta_ns) +{ + double speed_mbs, prod_mbs, bpf_mbs, send_hz, read_hz; + + prod_mbs = res->drops / 1000000.0 / (delta_ns / 1000000000.0); + speed_mbs = res->false_hits / 1000000.0 / (delta_ns / 1000000000.0); + bpf_mbs = res->hits / 1000000.0 / (delta_ns / 1000000000.0); + send_hz = (res->important_hits & 0xFFFFFFFF) / (delta_ns / 1000000000.0); + read_hz = (res->important_hits >> 32) / (delta_ns / 1000000000.0); + + printf("Iter %3d (%7.3lfus): ", + iter, (delta_ns - 1000000000) / 1000.0); + printf("Send Speed %8.3lf MB/s (%8.3lf calls/s), BPF Speed %8.3lf MB/s, " + "Rcv Speed %8.3lf MB/s (%8.3lf calls/s)\n", + prod_mbs, send_hz, bpf_mbs, speed_mbs, read_hz); +} + +static void report_final(struct bench_res res[], int res_cnt) +{ + double verdict_mbs_mean = 0.0; + long verdict_total = 0; + int i; + + for (i = 0; i < res_cnt; i++) { + verdict_mbs_mean += res[i].hits / 1000000.0 / (0.0 + res_cnt); + verdict_total += res[i].hits / 1000000.0; + } + + printf("Summary: total trans %8.3lu MB \u00B1 %5.3lf MB/s\n", + verdict_total, verdict_mbs_mean); +} + +static const struct argp_option opts[] = { + { "rx-normal", ARG_FW_RX_NORMAL, NULL, 0, + "simple reserve-proxy mode, no bfp enabled"}, + { "rx-pass", ARG_FW_RX_PASS, NULL, 0, + "run bpf prog but no redir applied"}, + { "rx-strp", ARG_CTL_RX_STRP, "Byte", 0, + "enable strparser and set the encapsulation size"}, + { "rx-verdict-egress", ARG_FW_RX_VERDICT_EGRESS, NULL, 0, + "forward data with bpf(stream verdict)"}, + { "rx-verdict-ingress", ARG_FW_RX_VERDICT_INGRESS, NULL, 0, + "forward data with bpf(stream verdict)"}, + { "tx-normal", ARG_FW_TX_NORMAL, NULL, 0, + "simple c-s mode, no bfp enabled"}, + { "tx-pass", ARG_FW_TX_PASS, NULL, 0, + "run bpf prog but no redir applied"}, + { "tx-verdict-ingress", ARG_FW_TX_VERDICT_INGRESS, NULL, 0, + "forward msg to ingress queue of another socket"}, + { "tx-verdict-egress", ARG_FW_TX_VERDICT_EGRESS, NULL, 0, + "forward msg to egress queue of another socket"}, + { "delay-consumer", ARG_CONSUMER_DELAY_TIME, "SEC", 0, + "delay consumer start"}, + { "producer-duration", ARG_PRODUCER_DURATION, "SEC", 0, + "producer duration"}, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case ARG_FW_RX_NORMAL...ARG_FW_TX_VERDICT_EGRESS: + ctx.mode = key; + break; + case ARG_CONSUMER_DELAY_TIME: + ctx.delay_consumer = strtol(arg, NULL, 10); + break; + case ARG_PRODUCER_DURATION: + ctx.prod_run_time = strtol(arg, NULL, 10); + break; + case ARG_CTL_RX_STRP: + ctx.strp_size = strtol(arg, NULL, 10); + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +/* exported into benchmark runner */ +const struct argp bench_sockmap_argp = { + .options = opts, + .parser = parse_arg, +}; + +/* Benchmark performance of creating bpf local storage */ +const struct bench bench_sockmap = { + .name = "sockmap", + .argp = &bench_sockmap_argp, + .validate = validate, + .setup = setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = report_progress, + .report_final = report_final, +}; diff --git a/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c new file mode 100644 index 000000000000..079bf3794b3a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bench_sockmap_prog.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +long process_byte = 0; +int verdict_dir = 0; +int dropped = 0; +int pkt_size = 0; +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_rx SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 20); + __type(key, int); + __type(value, int); +} sock_map_tx SEC(".maps"); + +SEC("sk_skb/stream_parser") +int prog_skb_parser(struct __sk_buff *skb) +{ + return pkt_size; +} + +SEC("sk_skb/stream_verdict") +int prog_skb_verdict(struct __sk_buff *skb) +{ + int one = 1; + int ret = bpf_sk_redirect_map(skb, &sock_map_rx, one, verdict_dir); + + if (ret == SK_DROP) + dropped++; + __sync_fetch_and_add(&process_byte, skb->len); + return ret; +} + +SEC("sk_skb/stream_verdict") +int prog_skb_pass(struct __sk_buff *skb) +{ + __sync_fetch_and_add(&process_byte, skb->len); + return SK_PASS; +} + +SEC("sk_msg") +int prog_skmsg_verdict(struct sk_msg_md *msg) +{ + int one = 1; + + __sync_fetch_and_add(&process_byte, msg->size); + return bpf_msg_redirect_map(msg, &sock_map_tx, one, verdict_dir); +} + +SEC("sk_msg") +int prog_skmsg_pass(struct sk_msg_md *msg) +{ + __sync_fetch_and_add(&process_byte, msg->size); + return SK_PASS; +} + +char _license[] SEC("license") = "GPL"; From b412fd6bcc4c1696b0674434f56b198950c0e2bf Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Tue, 8 Apr 2025 11:00:04 +0200 Subject: [PATCH 017/135] bpf: Clarify role of BPF_F_RECOMPUTE_CSUM BPF_F_RECOMPUTE_CSUM doesn't update the actual L3 and L4 checksums in the packet, but simply updates skb->csum (according to skb->ip_summed). This patch clarifies that to avoid confusions. Signed-off-by: Paul Chaignon Link: https://lore.kernel.org/r/ff6895d42936f03dbb82334d8bcfd50e00c79086.1744102490.git.paul.chaignon@gmail.com Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 14 +++++++++----- tools/include/uapi/linux/bpf.h | 14 +++++++++----- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 07ee73cdf97b..14ef3db844fa 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1995,11 +1995,15 @@ union bpf_attr { * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet - * associated to *skb*, at *offset*. *flags* are a combination of - * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the - * checksum for the packet after storing the bytes) and - * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ - * **->swhash** and *skb*\ **->l4hash** to 0). + * associated to *skb*, at *offset*. The *flags* are a combination + * of the following values: + * + * **BPF_F_RECOMPUTE_CSUM** + * Automatically update *skb*\ **->csum** after storing the + * bytes. + * **BPF_F_INVALIDATE_HASH** + * Set *skb*\ **->hash**, *skb*\ **->swhash** and *skb*\ + * **->l4hash** to 0. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 07ee73cdf97b..14ef3db844fa 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1995,11 +1995,15 @@ union bpf_attr { * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet - * associated to *skb*, at *offset*. *flags* are a combination of - * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the - * checksum for the packet after storing the bytes) and - * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ - * **->swhash** and *skb*\ **->l4hash** to 0). + * associated to *skb*, at *offset*. The *flags* are a combination + * of the following values: + * + * **BPF_F_RECOMPUTE_CSUM** + * Automatically update *skb*\ **->csum** after storing the + * bytes. + * **BPF_F_INVALIDATE_HASH** + * Set *skb*\ **->hash**, *skb*\ **->swhash** and *skb*\ + * **->l4hash** to 0. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers From 5a15a050df714959f0d5a57ac3201bd1c6594984 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Tue, 8 Apr 2025 11:00:51 +0200 Subject: [PATCH 018/135] bpf: Clarify the meaning of BPF_F_PSEUDO_HDR In the bpf_l4_csum_replace helper, the BPF_F_PSEUDO_HDR flag should only be set if the modified header field is part of the pseudo-header. If you modify for example the UDP ports and pass BPF_F_PSEUDO_HDR, inet_proto_csum_replace4 will update skb->csum even though it shouldn't (the port and the UDP checksum updates null each other). Signed-off-by: Paul Chaignon Link: https://lore.kernel.org/r/5126ef84ba75425b689482cbc98bffe75e5d8ab0.1744102490.git.paul.chaignon@gmail.com Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 2 +- tools/include/uapi/linux/bpf.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 14ef3db844fa..71d5ac83cf5d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2055,7 +2055,7 @@ union bpf_attr { * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates - * the checksum is to be computed against a pseudo-header. + * that the modified header field is part of the pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 14ef3db844fa..71d5ac83cf5d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2055,7 +2055,7 @@ union bpf_attr { * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and * for updates resulting in a null checksum the value is set to * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates - * the checksum is to be computed against a pseudo-header. + * that the modified header field is part of the pseudo-header. * * This helper works in combination with **bpf_csum_diff**\ (), * which does not update the checksum in-place, but offers more From ba2b31b0f39fca12abbd21c53a92838bbc026023 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 1 Apr 2025 14:22:45 +0800 Subject: [PATCH 019/135] bpf: Factor out htab_elem_value helper() All hash maps store map key and map value together. The relative offset of the map value compared to the map key is round_up(key_size, 8). Therefore, factor out a common helper htab_elem_value() to calculate the address of the map value instead of duplicating the logic. Acked-by: Andrii Nakryiko Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250401062250.543403-2-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/hashtab.c | 64 +++++++++++++++++++++----------------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 5a5adc66b8e2..0bebc919bbf7 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -175,20 +175,25 @@ static bool htab_is_percpu(const struct bpf_htab *htab) htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; } +static inline void *htab_elem_value(struct htab_elem *l, u32 key_size) +{ + return l->key + round_up(key_size, 8); +} + static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, void __percpu *pptr) { - *(void __percpu **)(l->key + roundup(key_size, 8)) = pptr; + *(void __percpu **)htab_elem_value(l, key_size) = pptr; } static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) { - return *(void __percpu **)(l->key + roundup(key_size, 8)); + return *(void __percpu **)htab_elem_value(l, key_size); } static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) { - return *(void **)(l->key + roundup(map->key_size, 8)); + return *(void **)htab_elem_value(l, map->key_size); } static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) @@ -215,10 +220,10 @@ static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab) elem = get_htab_elem(htab, i); if (btf_record_has_field(htab->map.record, BPF_TIMER)) bpf_obj_free_timer(htab->map.record, - elem->key + round_up(htab->map.key_size, 8)); + htab_elem_value(elem, htab->map.key_size)); if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) bpf_obj_free_workqueue(htab->map.record, - elem->key + round_up(htab->map.key_size, 8)); + htab_elem_value(elem, htab->map.key_size)); cond_resched(); } } @@ -245,7 +250,8 @@ static void htab_free_prealloced_fields(struct bpf_htab *htab) cond_resched(); } } else { - bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); + bpf_obj_free_fields(htab->map.record, + htab_elem_value(elem, htab->map.key_size)); cond_resched(); } cond_resched(); @@ -670,7 +676,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key) struct htab_elem *l = __htab_map_lookup_elem(map, key); if (l) - return l->key + round_up(map->key_size, 8); + return htab_elem_value(l, map->key_size); return NULL; } @@ -709,7 +715,7 @@ static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, if (l) { if (mark) bpf_lru_node_set_ref(&l->lru_node); - return l->key + round_up(map->key_size, 8); + return htab_elem_value(l, map->key_size); } return NULL; @@ -763,7 +769,7 @@ static void check_and_free_fields(struct bpf_htab *htab, for_each_possible_cpu(cpu) bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); } else { - void *map_value = elem->key + round_up(htab->map.key_size, 8); + void *map_value = htab_elem_value(elem, htab->map.key_size); bpf_obj_free_fields(htab->map.record, map_value); } @@ -1039,11 +1045,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, htab_elem_set_ptr(l_new, key_size, pptr); } else if (fd_htab_map_needs_adjust(htab)) { size = round_up(size, 8); - memcpy(l_new->key + round_up(key_size, 8), value, size); + memcpy(htab_elem_value(l_new, key_size), value, size); } else { - copy_map_value(&htab->map, - l_new->key + round_up(key_size, 8), - value); + copy_map_value(&htab->map, htab_elem_value(l_new, key_size), value); } l_new->hash = hash; @@ -1106,7 +1110,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, if (l_old) { /* grab the element lock and update value in place */ copy_map_value_locked(map, - l_old->key + round_up(key_size, 8), + htab_elem_value(l_old, key_size), value, false); return 0; } @@ -1134,7 +1138,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, * and update element in place */ copy_map_value_locked(map, - l_old->key + round_up(key_size, 8), + htab_elem_value(l_old, key_size), value, false); ret = 0; goto err; @@ -1220,8 +1224,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value l_new = prealloc_lru_pop(htab, key, hash); if (!l_new) return -ENOMEM; - copy_map_value(&htab->map, - l_new->key + round_up(map->key_size, 8), value); + copy_map_value(&htab->map, htab_elem_value(l_new, map->key_size), value); ret = htab_lock_bucket(b, &flags); if (ret) @@ -1500,10 +1503,10 @@ static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab) /* We only free timer on uref dropping to zero */ if (btf_record_has_field(htab->map.record, BPF_TIMER)) bpf_obj_free_timer(htab->map.record, - l->key + round_up(htab->map.key_size, 8)); + htab_elem_value(l, htab->map.key_size)); if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) bpf_obj_free_workqueue(htab->map.record, - l->key + round_up(htab->map.key_size, 8)); + htab_elem_value(l, htab->map.key_size)); } cond_resched_rcu(); } @@ -1615,15 +1618,12 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key, off += roundup_value_size; } } else { - u32 roundup_key_size = round_up(map->key_size, 8); + void *src = htab_elem_value(l, map->key_size); if (flags & BPF_F_LOCK) - copy_map_value_locked(map, value, l->key + - roundup_key_size, - true); + copy_map_value_locked(map, value, src, true); else - copy_map_value(map, value, l->key + - roundup_key_size); + copy_map_value(map, value, src); /* Zeroing special fields in the temp buffer */ check_and_init_map_value(map, value); } @@ -1680,12 +1680,12 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, bool is_percpu) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - u32 bucket_cnt, total, key_size, value_size, roundup_key_size; void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val; void __user *uvalues = u64_to_user_ptr(attr->batch.values); void __user *ukeys = u64_to_user_ptr(attr->batch.keys); void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch); u32 batch, max_count, size, bucket_size, map_id; + u32 bucket_cnt, total, key_size, value_size; struct htab_elem *node_to_free = NULL; u64 elem_map_flags, map_flags; struct hlist_nulls_head *head; @@ -1720,7 +1720,6 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map, return -ENOENT; key_size = htab->map.key_size; - roundup_key_size = round_up(htab->map.key_size, 8); value_size = htab->map.value_size; size = round_up(value_size, 8); if (is_percpu) @@ -1812,7 +1811,7 @@ again_nocopy: off += size; } } else { - value = l->key + roundup_key_size; + value = htab_elem_value(l, key_size); if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { struct bpf_map **inner_map = value; @@ -2063,11 +2062,11 @@ static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) { struct bpf_iter_seq_hash_map_info *info = seq->private; - u32 roundup_key_size, roundup_value_size; struct bpf_iter__bpf_map_elem ctx = {}; struct bpf_map *map = info->map; struct bpf_iter_meta meta; int ret = 0, off = 0, cpu; + u32 roundup_value_size; struct bpf_prog *prog; void __percpu *pptr; @@ -2077,10 +2076,9 @@ static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) ctx.meta = &meta; ctx.map = info->map; if (elem) { - roundup_key_size = round_up(map->key_size, 8); ctx.key = elem->key; if (!info->percpu_value_buf) { - ctx.value = elem->key + roundup_key_size; + ctx.value = htab_elem_value(elem, map->key_size); } else { roundup_value_size = round_up(map->value_size, 8); pptr = htab_elem_get_ptr(elem, map->key_size); @@ -2165,7 +2163,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_ struct hlist_nulls_head *head; struct hlist_nulls_node *n; struct htab_elem *elem; - u32 roundup_key_size; int i, num_elems = 0; void __percpu *pptr; struct bucket *b; @@ -2180,7 +2177,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_ is_percpu = htab_is_percpu(htab); - roundup_key_size = round_up(map->key_size, 8); /* migration has been disabled, so percpu value prepared here will be * the same as the one seen by the bpf program with * bpf_map_lookup_elem(). @@ -2196,7 +2192,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_ pptr = htab_elem_get_ptr(elem, map->key_size); val = this_cpu_ptr(pptr); } else { - val = elem->key + roundup_key_size; + val = htab_elem_value(elem, map->key_size); } num_elems++; ret = callback_fn((u64)(long)map, (u64)(long)key, From 5771e306b6cd8ce5b9935d006765f887f145e6d5 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 1 Apr 2025 14:22:46 +0800 Subject: [PATCH 020/135] bpf: Rename __htab_percpu_map_update_elem to htab_map_update_elem_in_place Rename __htab_percpu_map_update_elem to htab_map_update_elem_in_place, and add a new percpu argument for the helper to support in-place update for both per-cpu htab and htab of maps. Acked-by: Andrii Nakryiko Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250401062250.543403-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/hashtab.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 0bebc919bbf7..9778e9871d86 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1258,12 +1258,12 @@ err_lock_bucket: return ret; } -static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key, +static long htab_map_update_elem_in_place(struct bpf_map *map, void *key, void *value, u64 map_flags, - bool onallcpus) + bool percpu, bool onallcpus) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct htab_elem *l_new = NULL, *l_old; + struct htab_elem *l_new, *l_old; struct hlist_nulls_head *head; unsigned long flags; struct bucket *b; @@ -1295,19 +1295,18 @@ static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key, goto err; if (l_old) { - /* per-cpu hash map can update value in-place */ + /* Update value in-place */ pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), value, onallcpus); } else { l_new = alloc_htab_elem(htab, key, value, key_size, - hash, true, onallcpus, NULL); + hash, percpu, onallcpus, NULL); if (IS_ERR(l_new)) { ret = PTR_ERR(l_new); goto err; } hlist_nulls_add_head_rcu(&l_new->hash_node, head); } - ret = 0; err: htab_unlock_bucket(b, flags); return ret; @@ -1386,7 +1385,7 @@ err_lock_bucket: static long htab_percpu_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { - return __htab_percpu_map_update_elem(map, key, value, map_flags, false); + return htab_map_update_elem_in_place(map, key, value, map_flags, true, false); } static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, @@ -2407,8 +2406,8 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, ret = __htab_lru_percpu_map_update_elem(map, key, value, map_flags, true); else - ret = __htab_percpu_map_update_elem(map, key, value, map_flags, - true); + ret = htab_map_update_elem_in_place(map, key, value, map_flags, + true, true); rcu_read_unlock(); return ret; From 2c304172e03193bd02363ee8969444261f7b7a57 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 1 Apr 2025 14:22:47 +0800 Subject: [PATCH 021/135] bpf: Support atomic update for htab of maps As reported by Cody Haas [1], when there is concurrent map lookup and map update operation in an existing element for htab of maps, the map lookup procedure may return -ENOENT unexpectedly. The root cause is twofold: 1) the update of existing element involves two separated list operation In htab_map_update_elem(), it first inserts the new element at the head of list, then it deletes the old element. Therefore, it is possible a lookup operation has already iterated to the middle of the list when a concurrent update operation begins, and the lookup operation will fail to find the target element. 2) the immediate reuse of htab element. It is more subtle. Even through the lookup operation finds the old element, it is possible that the target element has been removed by a concurrent update operation, and the element has been reused immediately by other update operation which runs on the same CPU as the previous update operation, and the element is inserted into the same bucket list. After these steps above, when the lookup operation tries to compare the key in the old element with the expected key, the match will fail because the key in the old element have been overwritten by other update operation. The two-step update process is relatively straightforward to address. The more challenging aspect is the immediate reuse. As Alexei pointed out: So since 2022 both prealloc and no_prealloc reuse elements. We can consider a new flag for the hash map like F_REUSE_AFTER_RCU_GP that will use _rcu() flavor of freeing into bpf_ma, but it has to have a strong reason. Given that htab of maps doesn't support special field in value and directly stores the inner map pointer in htab_element, just do in-place update for htab of maps instead of attempting to address the immediate reuse issue. [1]: https://lore.kernel.org/xdp-newbies/CAH7f-ULFTwKdoH_t2SFc5rWCVYLEg-14d1fBYWH2eekudsnTRg@mail.gmail.com/ Acked-by: Andrii Nakryiko Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250401062250.543403-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/hashtab.c | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 9778e9871d86..4879c79dd677 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1076,10 +1076,9 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct htab_elem *l_new = NULL, *l_old; + struct htab_elem *l_new, *l_old; struct hlist_nulls_head *head; unsigned long flags; - void *old_map_ptr; struct bucket *b; u32 key_size, hash; int ret; @@ -1160,24 +1159,14 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, hlist_nulls_del_rcu(&l_old->hash_node); /* l_old has already been stashed in htab->extra_elems, free - * its special fields before it is available for reuse. Also - * save the old map pointer in htab of maps before unlock - * and release it after unlock. + * its special fields before it is available for reuse. */ - old_map_ptr = NULL; - if (htab_is_prealloc(htab)) { - if (map->ops->map_fd_put_ptr) - old_map_ptr = fd_htab_map_get_ptr(map, l_old); + if (htab_is_prealloc(htab)) check_and_free_fields(htab, l_old); - } } htab_unlock_bucket(b, flags); - if (l_old) { - if (old_map_ptr) - map->ops->map_fd_put_ptr(map, old_map_ptr, true); - if (!htab_is_prealloc(htab)) - free_htab_elem(htab, l_old); - } + if (l_old && !htab_is_prealloc(htab)) + free_htab_elem(htab, l_old); return 0; err: htab_unlock_bucket(b, flags); @@ -1265,6 +1254,7 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key, struct bpf_htab *htab = container_of(map, struct bpf_htab, map); struct htab_elem *l_new, *l_old; struct hlist_nulls_head *head; + void *old_map_ptr = NULL; unsigned long flags; struct bucket *b; u32 key_size, hash; @@ -1296,8 +1286,15 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key, if (l_old) { /* Update value in-place */ - pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), - value, onallcpus); + if (percpu) { + pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), + value, onallcpus); + } else { + void **inner_map_pptr = htab_elem_value(l_old, key_size); + + old_map_ptr = *inner_map_pptr; + WRITE_ONCE(*inner_map_pptr, *(void **)value); + } } else { l_new = alloc_htab_elem(htab, key, value, key_size, hash, percpu, onallcpus, NULL); @@ -1309,6 +1306,8 @@ static long htab_map_update_elem_in_place(struct bpf_map *map, void *key, } err: htab_unlock_bucket(b, flags); + if (old_map_ptr) + map->ops->map_fd_put_ptr(map, old_map_ptr, true); return ret; } @@ -2531,24 +2530,23 @@ int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) return ret; } -/* only called from syscall */ +/* Only called from syscall */ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags) { void *ptr; int ret; - u32 ufd = *(u32 *)value; - ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); + ptr = map->ops->map_fd_get_ptr(map, map_file, *(int *)value); if (IS_ERR(ptr)) return PTR_ERR(ptr); /* The htab bucket lock is always held during update operations in fd * htab map, and the following rcu_read_lock() is only used to avoid - * the WARN_ON_ONCE in htab_map_update_elem(). + * the WARN_ON_ONCE in htab_map_update_elem_in_place(). */ rcu_read_lock(); - ret = htab_map_update_elem(map, key, &ptr, map_flags); + ret = htab_map_update_elem_in_place(map, key, &ptr, map_flags, false, false); rcu_read_unlock(); if (ret) map->ops->map_fd_put_ptr(map, ptr, false); From e8a65856c75d518d0bb15f38c90a4fd264ba1d3a Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 1 Apr 2025 14:22:48 +0800 Subject: [PATCH 022/135] bpf: Add is_fd_htab() helper Add is_fd_htab() helper to check whether the map is htab of maps. Acked-by: Andrii Nakryiko Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250401062250.543403-5-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/hashtab.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4879c79dd677..097992efef05 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -175,6 +175,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab) htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; } +static inline bool is_fd_htab(const struct bpf_htab *htab) +{ + return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS; +} + static inline void *htab_elem_value(struct htab_elem *l, u32 key_size) { return l->key + round_up(key_size, 8); @@ -974,8 +979,7 @@ static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) { - return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && - BITS_PER_LONG == 64; + return is_fd_htab(htab) && BITS_PER_LONG == 64; } static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, @@ -1810,7 +1814,7 @@ again_nocopy: } } else { value = htab_elem_value(l, key_size); - if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { + if (is_fd_htab(htab)) { struct bpf_map **inner_map = value; /* Actual value is the id of the inner map */ From 6704b1e8cfc5eed264065735fe00a1dd8a0bffef Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 1 Apr 2025 14:22:49 +0800 Subject: [PATCH 023/135] bpf: Don't allocate per-cpu extra_elems for fd htab The update of element in fd htab is in-place now, therefore, there is no need to allocate per-cpu extra_elems, just remove it. Acked-by: Andrii Nakryiko Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250401062250.543403-6-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/hashtab.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 097992efef05..2e18d7e50d9b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -206,9 +206,13 @@ static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); } +/* Both percpu and fd htab support in-place update, so no need for + * extra elem. LRU itself can remove the least used element, so + * there is no need for an extra elem during map_update. + */ static bool htab_has_extra_elems(struct bpf_htab *htab) { - return !htab_is_percpu(htab) && !htab_is_lru(htab); + return !htab_is_percpu(htab) && !htab_is_lru(htab) && !is_fd_htab(htab); } static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab) @@ -464,8 +468,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) { bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); - bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || - attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); /* percpu_lru means each cpu has its own LRU list. * it is different from BPF_MAP_TYPE_PERCPU_HASH where * the map's value itself is percpu. percpu_lru has @@ -560,10 +562,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) if (err) goto free_map_locked; - if (!percpu && !lru) { - /* lru itself can remove the least used element, so - * there is no need for an extra elem during map_update. - */ + if (htab_has_extra_elems(htab)) { err = alloc_extra_elems(htab); if (err) goto free_prealloc; From 7c6fb1cf33fb9a7b89a0d9feada957d0fe56de6f Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Tue, 1 Apr 2025 14:22:50 +0800 Subject: [PATCH 024/135] selftests/bpf: Add test case for atomic update of fd htab Add a test case to verify the atomic update of existing elements in the htab of maps. The test proceeds in three steps: 1) fill the outer map with keys in the range [0, 8] For each inner array map, the value of its first element is set as the key used to lookup the inner map. 2) create 16 threads to lookup these keys concurrently Each lookup thread first lookups the inner map, then it checks whether the first value of the inner array map is the same as the key used to lookup the inner map. 3) create 8 threads to overwrite these keys concurrently Each update thread first creates an inner array, it sets the first value of the array to the key used to update the outer map, then it uses the key and the inner map to update the outer map. Without atomic update support, the lookup operation may return -ENOENT during the lookup of outer map, or return -EINVAL during the comparison of the first value in the inner map and the key used for inner map, and the test will fail. After the atomic update change, both the lookup and the comparison will succeed. Given that the update of outer map is slow, the test case sets the loop number for each thread as 5 to reduce the total running time. However, the loop number could also be adjusted through FD_HTAB_LOOP_NR environment variable. Acked-by: Andrii Nakryiko Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250401062250.543403-7-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/fd_htab_lookup.c | 192 ++++++++++++++++++ .../selftests/bpf/progs/fd_htab_lookup.c | 25 +++ 2 files changed, 217 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c create mode 100644 tools/testing/selftests/bpf/progs/fd_htab_lookup.c diff --git a/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c b/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c new file mode 100644 index 000000000000..ca46fdd6e1ae --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fd_htab_lookup.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#define _GNU_SOURCE +#include +#include +#include "fd_htab_lookup.skel.h" + +struct htab_op_ctx { + int fd; + int loop; + unsigned int entries; + bool stop; +}; + +#define ERR_TO_RETVAL(where, err) ((void *)(long)(((where) << 12) | (-err))) + +static void *htab_lookup_fn(void *arg) +{ + struct htab_op_ctx *ctx = arg; + int i = 0; + + while (i++ < ctx->loop && !ctx->stop) { + unsigned int j; + + for (j = 0; j < ctx->entries; j++) { + unsigned int key = j, zero = 0, value; + int inner_fd, err; + + err = bpf_map_lookup_elem(ctx->fd, &key, &value); + if (err) { + ctx->stop = true; + return ERR_TO_RETVAL(1, err); + } + + inner_fd = bpf_map_get_fd_by_id(value); + if (inner_fd < 0) { + /* The old map has been freed */ + if (inner_fd == -ENOENT) + continue; + ctx->stop = true; + return ERR_TO_RETVAL(2, inner_fd); + } + + err = bpf_map_lookup_elem(inner_fd, &zero, &value); + if (err) { + close(inner_fd); + ctx->stop = true; + return ERR_TO_RETVAL(3, err); + } + close(inner_fd); + + if (value != key) { + ctx->stop = true; + return ERR_TO_RETVAL(4, -EINVAL); + } + } + } + + return NULL; +} + +static void *htab_update_fn(void *arg) +{ + struct htab_op_ctx *ctx = arg; + int i = 0; + + while (i++ < ctx->loop && !ctx->stop) { + unsigned int j; + + for (j = 0; j < ctx->entries; j++) { + unsigned int key = j, zero = 0; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); + if (inner_fd < 0) { + ctx->stop = true; + return ERR_TO_RETVAL(1, inner_fd); + } + + err = bpf_map_update_elem(inner_fd, &zero, &key, 0); + if (err) { + close(inner_fd); + ctx->stop = true; + return ERR_TO_RETVAL(2, err); + } + + err = bpf_map_update_elem(ctx->fd, &key, &inner_fd, BPF_EXIST); + if (err) { + close(inner_fd); + ctx->stop = true; + return ERR_TO_RETVAL(3, err); + } + close(inner_fd); + } + } + + return NULL; +} + +static int setup_htab(int fd, unsigned int entries) +{ + unsigned int i; + + for (i = 0; i < entries; i++) { + unsigned int key = i, zero = 0; + int inner_fd, err; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 4, 1, NULL); + if (!ASSERT_OK_FD(inner_fd, "new array")) + return -1; + + err = bpf_map_update_elem(inner_fd, &zero, &key, 0); + if (!ASSERT_OK(err, "init array")) { + close(inner_fd); + return -1; + } + + err = bpf_map_update_elem(fd, &key, &inner_fd, 0); + if (!ASSERT_OK(err, "init outer")) { + close(inner_fd); + return -1; + } + close(inner_fd); + } + + return 0; +} + +static int get_int_from_env(const char *name, int dft) +{ + const char *value; + + value = getenv(name); + if (!value) + return dft; + + return atoi(value); +} + +void test_fd_htab_lookup(void) +{ + unsigned int i, wr_nr = 8, rd_nr = 16; + pthread_t tids[wr_nr + rd_nr]; + struct fd_htab_lookup *skel; + struct htab_op_ctx ctx; + int err; + + skel = fd_htab_lookup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fd_htab_lookup__open_and_load")) + return; + + ctx.fd = bpf_map__fd(skel->maps.outer_map); + ctx.loop = get_int_from_env("FD_HTAB_LOOP_NR", 5); + ctx.stop = false; + ctx.entries = 8; + + err = setup_htab(ctx.fd, ctx.entries); + if (err) + goto destroy; + + memset(tids, 0, sizeof(tids)); + for (i = 0; i < wr_nr; i++) { + err = pthread_create(&tids[i], NULL, htab_update_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + ctx.stop = true; + goto reap; + } + } + for (i = 0; i < rd_nr; i++) { + err = pthread_create(&tids[i + wr_nr], NULL, htab_lookup_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) { + ctx.stop = true; + goto reap; + } + } + +reap: + for (i = 0; i < wr_nr + rd_nr; i++) { + void *ret = NULL; + char desc[32]; + + if (!tids[i]) + continue; + + snprintf(desc, sizeof(desc), "thread %u", i + 1); + err = pthread_join(tids[i], &ret); + ASSERT_OK(err, desc); + ASSERT_EQ(ret, NULL, desc); + } +destroy: + fd_htab_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/fd_htab_lookup.c b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c new file mode 100644 index 000000000000..a4a9e1db626f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fd_htab_lookup.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2025. Huawei Technologies Co., Ltd */ +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct inner_map_type { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(key_size, 4); + __uint(value_size, 4); + __uint(max_entries, 1); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 64); + __type(key, int); + __type(value, int); + __array(values, struct inner_map_type); +} outer_map SEC(".maps") = { + .values = { + [0] = &inner_map, + }, +}; From 7d0b43b68d1cd4256de60b73249397db3c4e16d6 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Tue, 15 Apr 2025 14:14:59 +0800 Subject: [PATCH 025/135] selftest/bpf/benchs: Remove duplicate sys/types.h header ./tools/testing/selftests/bpf/benchs/bench_sockmap.c: sys/types.h is included more than once. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=20436 Signed-off-by: Jiapeng Chong Signed-off-by: Martin KaFai Lau Link: https://patch.msgid.link/20250415061459.11644-1-jiapeng.chong@linux.alibaba.com --- tools/testing/selftests/bpf/benchs/bench_sockmap.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/bpf/benchs/bench_sockmap.c b/tools/testing/selftests/bpf/benchs/bench_sockmap.c index 54f4e7c03cd2..8ebf563a67a2 100644 --- a/tools/testing/selftests/bpf/benchs/bench_sockmap.c +++ b/tools/testing/selftests/bpf/benchs/bench_sockmap.c @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include From 21cb33c7e06533229df01c05248edbd23cefc89c Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Mon, 14 Apr 2025 11:59:18 -0700 Subject: [PATCH 026/135] kbuild, bpf: Enable --btf_features=attributes pahole v1.30 has a BTF encoding feature for arbitrary attributes, used in particular for tagging bpf_arena_alloc_pages and bpf_arena_free_pages BPF kfuncs [1][2]. Enable it for the kernel build. [1] https://lore.kernel.org/bpf/20250130201239.1429648-1-ihor.solodrai@linux.dev/ [2] https://lore.kernel.org/bpf/20250228194654.1022535-1-ihor.solodrai@linux.dev/ Signed-off-by: Ihor Solodrai Signed-off-by: Martin KaFai Lau Tested-by: Eduard Zingerman Link: https://patch.msgid.link/20250414185918.538195-1-ihor.solodrai@linux.dev --- scripts/Makefile.btf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf index fbaaec2187e5..db76335dd917 100644 --- a/scripts/Makefile.btf +++ b/scripts/Makefile.btf @@ -23,6 +23,8 @@ else # Switch to using --btf_features for v1.26 and later. pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j$(JOBS) --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs +pahole-flags-$(call test-ge, $(pahole-ver), 130) += --btf_features=attributes + ifneq ($(KBUILD_EXTMOD),) module-pahole-flags-$(call test-ge, $(pahole-ver), 128) += --btf_features=distilled_base endif From ee684de5c1b0ac01821320826baec7da93f3615b Mon Sep 17 00:00:00 2001 From: Viktor Malik Date: Tue, 15 Apr 2025 17:50:14 +0200 Subject: [PATCH 027/135] libbpf: Fix buffer overflow in bpf_object__init_prog As shown in [1], it is possible to corrupt a BPF ELF file such that arbitrary BPF instructions are loaded by libbpf. This can be done by setting a symbol (BPF program) section offset to a large (unsigned) number such that
overflows and points before the section data in the memory. Consider the situation below where: - prog_start = sec_start + symbol_offset <-- size_t overflow here - prog_end = prog_start + prog_size prog_start sec_start prog_end sec_end | | | | v v v v .....................|################################|............ The report in [1] also provides a corrupted BPF ELF which can be used as a reproducer: $ readelf -S crash Section Headers: [Nr] Name Type Address Offset Size EntSize Flags Link Info Align ... [ 2] uretprobe.mu[...] PROGBITS 0000000000000000 00000040 0000000000000068 0000000000000000 AX 0 0 8 $ readelf -s crash Symbol table '.symtab' contains 8 entries: Num: Value Size Type Bind Vis Ndx Name ... 6: ffffffffffffffb8 104 FUNC GLOBAL DEFAULT 2 handle_tp Here, the handle_tp prog has section offset ffffffffffffffb8, i.e. will point before the actual memory where section 2 is allocated. This is also reported by AddressSanitizer: ================================================================= ==1232==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x7c7302fe0000 at pc 0x7fc3046e4b77 bp 0x7ffe64677cd0 sp 0x7ffe64677490 READ of size 104 at 0x7c7302fe0000 thread T0 #0 0x7fc3046e4b76 in memcpy (/lib64/libasan.so.8+0xe4b76) #1 0x00000040df3e in bpf_object__init_prog /src/libbpf/src/libbpf.c:856 #2 0x00000040df3e in bpf_object__add_programs /src/libbpf/src/libbpf.c:928 #3 0x00000040df3e in bpf_object__elf_collect /src/libbpf/src/libbpf.c:3930 #4 0x00000040df3e in bpf_object_open /src/libbpf/src/libbpf.c:8067 #5 0x00000040f176 in bpf_object__open_file /src/libbpf/src/libbpf.c:8090 #6 0x000000400c16 in main /poc/poc.c:8 #7 0x7fc3043d25b4 in __libc_start_call_main (/lib64/libc.so.6+0x35b4) #8 0x7fc3043d2667 in __libc_start_main@@GLIBC_2.34 (/lib64/libc.so.6+0x3667) #9 0x000000400b34 in _start (/poc/poc+0x400b34) 0x7c7302fe0000 is located 64 bytes before 104-byte region [0x7c7302fe0040,0x7c7302fe00a8) allocated by thread T0 here: #0 0x7fc3046e716b in malloc (/lib64/libasan.so.8+0xe716b) #1 0x7fc3045ee600 in __libelf_set_rawdata_wrlock (/lib64/libelf.so.1+0xb600) #2 0x7fc3045ef018 in __elf_getdata_rdlock (/lib64/libelf.so.1+0xc018) #3 0x00000040642f in elf_sec_data /src/libbpf/src/libbpf.c:3740 The problem here is that currently, libbpf only checks that the program end is within the section bounds. There used to be a check `while (sec_off < sec_sz)` in bpf_object__add_programs, however, it was removed by commit 6245947c1b3c ("libbpf: Allow gaps in BPF program sections to support overriden weak functions"). Add a check for detecting the overflow of `sec_off + prog_sz` to bpf_object__init_prog to fix this issue. [1] https://github.com/lmarch2/poc/blob/main/libbpf/libbpf.md Fixes: 6245947c1b3c ("libbpf: Allow gaps in BPF program sections to support overriden weak functions") Reported-by: lmarch2 <2524158037@qq.com> Signed-off-by: Viktor Malik Signed-off-by: Andrii Nakryiko Reviewed-by: Shung-Hsi Yu Link: https://github.com/lmarch2/poc/blob/main/libbpf/libbpf.md Link: https://lore.kernel.org/bpf/20250415155014.397603-1-vmalik@redhat.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b2591f5cab65..56250b5ac5b0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -896,7 +896,7 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return -LIBBPF_ERRNO__FORMAT; } - if (sec_off + prog_sz > sec_sz) { + if (sec_off + prog_sz > sec_sz || sec_off + prog_sz < sec_off) { pr_warn("sec '%s': program at offset %zu crosses section boundary\n", sec_name, sec_off); return -LIBBPF_ERRNO__FORMAT; From 8582d9ab3efdebb88e0cd8beed8e0b9de76443e7 Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Thu, 10 Apr 2025 11:28:23 -0700 Subject: [PATCH 028/135] libbpf: Verify section type in btf_find_elf_sections A valid ELF file may contain a SHT_NOBITS .BTF section. This case is not handled correctly in btf_parse_elf, which leads to a segfault. Before attempting to load BTF section data, check that the section type is SHT_PROGBITS, which is the expected type for BTF data. Fail with an error if the type is different. Bug report: https://github.com/libbpf/libbpf/issues/894 v1: https://lore.kernel.org/bpf/20250408184104.3962949-1-ihor.solodrai@linux.dev/ Signed-off-by: Ihor Solodrai Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250410182823.1591681-1-ihor.solodrai@linux.dev --- tools/lib/bpf/btf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 38bc6b14b066..24fc71ce5631 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1148,6 +1148,12 @@ static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs else continue; + if (sh.sh_type != SHT_PROGBITS) { + pr_warn("unexpected section type (%d) of section(%d, %s) from %s\n", + sh.sh_type, idx, name, path); + goto err; + } + data = elf_getdata(scn, 0); if (!data) { pr_warn("failed to get section(%d, %s) data from %s\n", From be2fea9c07d40a0a897580166e3d43c53ef3b75b Mon Sep 17 00:00:00 2001 From: Malaya Kumar Rout Date: Mon, 21 Apr 2025 23:14:05 +0530 Subject: [PATCH 029/135] selftests/bpf: Close the file descriptor to avoid resource leaks Static analysis found an issue in bench_htab_mem.c and sk_assign.c cppcheck output before this patch: tools/testing/selftests/bpf/benchs/bench_htab_mem.c:284:3: error: Resource leak: fd [resourceLeak] tools/testing/selftests/bpf/prog_tests/sk_assign.c:41:3: error: Resource leak: tc [resourceLeak] cppcheck output after this patch: No resource leaks found Fix the issue by closing the file descriptors fd and tc. Signed-off-by: Malaya Kumar Rout Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250421174405.26080-1-malayarout91@gmail.com --- tools/testing/selftests/bpf/benchs/bench_htab_mem.c | 3 +-- tools/testing/selftests/bpf/prog_tests/sk_assign.c | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c index 926ee822143e..297e32390cd1 100644 --- a/tools/testing/selftests/bpf/benchs/bench_htab_mem.c +++ b/tools/testing/selftests/bpf/benchs/bench_htab_mem.c @@ -279,6 +279,7 @@ static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value) } got = read(fd, buf, sizeof(buf) - 1); + close(fd); if (got <= 0) { *value = 0; return; @@ -286,8 +287,6 @@ static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value) buf[got] = 0; *value = strtoull(buf, NULL, 0); - - close(fd); } static void htab_mem_measure(struct bench_res *res) diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 0b9bd1d6f7cc..10a0ab954b8a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -37,8 +37,10 @@ configure_stack(void) tc = popen("tc -V", "r"); if (CHECK_FAIL(!tc)) return false; - if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) + if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) { + pclose(tc); return false; + } if (strstr(tc_version, ", libbpf ")) prog = "test_sk_assign_libbpf.bpf.o"; else From 4dde20b1aa85d69c4281eaac9a7cfa7d2b62ecf0 Mon Sep 17 00:00:00 2001 From: Feng Yang Date: Thu, 17 Apr 2025 09:48:46 +0800 Subject: [PATCH 030/135] libbpf: Fix event name too long error When the binary path is excessively long, the generated probe_name in libbpf exceeds the kernel's MAX_EVENT_NAME_LEN limit (64 bytes). This causes legacy uprobe event attachment to fail with error code -22. The fix reorders the fields to place the unique ID before the name. This ensures that even if truncation occurs via snprintf, the unique ID remains intact, preserving event name uniqueness. Additionally, explicit checks with MAX_EVENT_NAME_LEN are added to enforce length constraints. Before Fix: ./test_progs -t attach_probe/kprobe-long_name ...... libbpf: failed to add legacy kprobe event for 'bpf_testmod_looooooooooooooooooooooooooooooong_name+0x0': -EINVAL libbpf: prog 'handle_kprobe': failed to create kprobe 'bpf_testmod_looooooooooooooooooooooooooooooong_name+0x0' perf event: -EINVAL test_attach_kprobe_long_event_name:FAIL:attach_kprobe_long_event_name unexpected error: -22 test_attach_probe:PASS:uprobe_ref_ctr_cleanup 0 nsec #13/11 attach_probe/kprobe-long_name:FAIL #13 attach_probe:FAIL ./test_progs -t attach_probe/uprobe-long_name ...... libbpf: failed to add legacy uprobe event for /root/linux-bpf/bpf-next/tools/testing/selftests/bpf/test_progs:0x13efd9: -EINVAL libbpf: prog 'handle_uprobe': failed to create uprobe '/root/linux-bpf/bpf-next/tools/testing/selftests/bpf/test_progs:0x13efd9' perf event: -EINVAL test_attach_uprobe_long_event_name:FAIL:attach_uprobe_long_event_name unexpected error: -22 #13/10 attach_probe/uprobe-long_name:FAIL #13 attach_probe:FAIL After Fix: ./test_progs -t attach_probe/uprobe-long_name #13/10 attach_probe/uprobe-long_name:OK #13 attach_probe:OK Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED ./test_progs -t attach_probe/kprobe-long_name #13/11 attach_probe/kprobe-long_name:OK #13 attach_probe:OK Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED Fixes: 46ed5fc33db9 ("libbpf: Refactor and simplify legacy kprobe code") Fixes: cc10623c6810 ("libbpf: Add legacy uprobe attaching support") Signed-off-by: Hengqi Chen Signed-off-by: Feng Yang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250417014848.59321-2-yangfeng59949@163.com --- tools/lib/bpf/libbpf.c | 43 ++++++++++++++++-------------------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 56250b5ac5b0..fbb991c6bf19 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -60,6 +60,8 @@ #define BPF_FS_MAGIC 0xcafe4a11 #endif +#define MAX_EVENT_NAME_LEN 64 + #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf" #define BPF_INSN_SZ (sizeof(struct bpf_insn)) @@ -11136,16 +11138,16 @@ static const char *tracefs_available_filter_functions_addrs(void) : TRACEFS"/available_filter_functions_addrs"; } -static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, - const char *kfunc_name, size_t offset) +static void gen_probe_legacy_event_name(char *buf, size_t buf_sz, + const char *name, size_t offset) { static int index = 0; int i; - snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset, - __sync_fetch_and_add(&index, 1)); + snprintf(buf, buf_sz, "libbpf_%u_%d_%s_0x%zx", getpid(), + __sync_fetch_and_add(&index, 1), name, offset); - /* sanitize binary_path in the probe name */ + /* sanitize name in the probe name */ for (i = 0; buf[i]; i++) { if (!isalnum(buf[i])) buf[i] = '_'; @@ -11270,9 +11272,9 @@ int probe_kern_syscall_wrapper(int token_fd) return pfd >= 0 ? 1 : 0; } else { /* legacy mode */ - char probe_name[128]; + char probe_name[MAX_EVENT_NAME_LEN]; - gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0); if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0) return 0; @@ -11328,10 +11330,10 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, func_name, offset, -1 /* pid */, 0 /* ref_ctr_off */); } else { - char probe_name[256]; + char probe_name[MAX_EVENT_NAME_LEN]; - gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), - func_name, offset); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), + func_name, offset); legacy_probe = strdup(probe_name); if (!legacy_probe) @@ -11875,20 +11877,6 @@ static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, stru return ret; } -static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, - const char *binary_path, uint64_t offset) -{ - int i; - - snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); - - /* sanitize binary_path in the probe name */ - for (i = 0; buf[i]; i++) { - if (!isalnum(buf[i])) - buf[i] = '_'; - } -} - static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, const char *binary_path, size_t offset) { @@ -12312,13 +12300,14 @@ bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, func_offset, pid, ref_ctr_off); } else { - char probe_name[PATH_MAX + 64]; + char probe_name[MAX_EVENT_NAME_LEN]; if (ref_ctr_off) return libbpf_err_ptr(-EINVAL); - gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), - binary_path, func_offset); + gen_probe_legacy_event_name(probe_name, sizeof(probe_name), + strrchr(binary_path, '/') ? : binary_path, + func_offset); legacy_probe = strdup(probe_name); if (!legacy_probe) From e1be7c45d24434bc6e04b675ae91c049e50447be Mon Sep 17 00:00:00 2001 From: Feng Yang Date: Thu, 17 Apr 2025 09:48:47 +0800 Subject: [PATCH 031/135] selftests/bpf: Add test for attaching uprobe with long event names This test verifies that attaching uprobe/uretprobe with long event names does not trigger EINVAL errors. Signed-off-by: Feng Yang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250417014848.59321-3-yangfeng59949@163.com --- .../selftests/bpf/prog_tests/attach_probe.c | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 329c7862b52d..9b7f36f39c32 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -122,6 +122,52 @@ cleanup: test_attach_probe_manual__destroy(skel); } +/* attach uprobe/uretprobe long event name testings */ +static void test_attach_uprobe_long_event_name(void) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + struct bpf_link *uprobe_link, *uretprobe_link; + struct test_attach_probe_manual *skel; + ssize_t uprobe_offset; + char path[PATH_MAX] = {0}; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) + goto cleanup; + + if (!ASSERT_GT(readlink("/proc/self/exe", path, PATH_MAX - 1), 0, "readlink")) + goto cleanup; + + /* manual-attach uprobe/uretprobe */ + uprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY; + uprobe_opts.ref_ctr_offset = 0; + uprobe_opts.retprobe = false; + uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, + 0 /* self pid */, + path, + uprobe_offset, + &uprobe_opts); + if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_long_event_name")) + goto cleanup; + skel->links.handle_uprobe = uprobe_link; + + uprobe_opts.retprobe = true; + uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, + -1 /* any pid */, + path, + uprobe_offset, &uprobe_opts); + if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_long_event_name")) + goto cleanup; + skel->links.handle_uretprobe = uretprobe_link; + +cleanup: + test_attach_probe_manual__destroy(skel); +} + static void test_attach_probe_auto(struct test_attach_probe *skel) { struct bpf_link *uprobe_err_link; @@ -323,6 +369,9 @@ void test_attach_probe(void) if (test__start_subtest("uprobe-ref_ctr")) test_uprobe_ref_ctr(skel); + if (test__start_subtest("uprobe-long_name")) + test_attach_uprobe_long_event_name(); + cleanup: test_attach_probe__destroy(skel); ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup"); From 9b72f3e5b76007d8ef9c7743561d4c9298a086ab Mon Sep 17 00:00:00 2001 From: Feng Yang Date: Thu, 17 Apr 2025 09:48:48 +0800 Subject: [PATCH 032/135] selftests/bpf: Add test for attaching kprobe with long event names This test verifies that attaching kprobe/kretprobe with long event names does not trigger EINVAL errors. Signed-off-by: Feng Yang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250417014848.59321-4-yangfeng59949@163.com --- .../selftests/bpf/prog_tests/attach_probe.c | 35 +++++++++++++++++++ .../selftests/bpf/test_kmods/bpf_testmod.c | 4 +++ 2 files changed, 39 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 9b7f36f39c32..cabc51c2ca6b 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -168,6 +168,39 @@ cleanup: test_attach_probe_manual__destroy(skel); } +/* attach kprobe/kretprobe long event name testings */ +static void test_attach_kprobe_long_event_name(void) +{ + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); + struct bpf_link *kprobe_link, *kretprobe_link; + struct test_attach_probe_manual *skel; + + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + + /* manual-attach kprobe/kretprobe */ + kprobe_opts.attach_mode = PROBE_ATTACH_MODE_LEGACY; + kprobe_opts.retprobe = false; + kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + "bpf_testmod_looooooooooooooooooooooooooooooong_name", + &kprobe_opts); + if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe_long_event_name")) + goto cleanup; + skel->links.handle_kprobe = kprobe_link; + + kprobe_opts.retprobe = true; + kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + "bpf_testmod_looooooooooooooooooooooooooooooong_name", + &kprobe_opts); + if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe_long_event_name")) + goto cleanup; + skel->links.handle_kretprobe = kretprobe_link; + +cleanup: + test_attach_probe_manual__destroy(skel); +} + static void test_attach_probe_auto(struct test_attach_probe *skel) { struct bpf_link *uprobe_err_link; @@ -371,6 +404,8 @@ void test_attach_probe(void) if (test__start_subtest("uprobe-long_name")) test_attach_uprobe_long_event_name(); + if (test__start_subtest("kprobe-long_name")) + test_attach_kprobe_long_event_name(); cleanup: test_attach_probe__destroy(skel); diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index f38eaf0d35ef..2e54b95ad898 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -134,6 +134,10 @@ bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { return bpf_testmod_test_struct_arg_result; } +__weak noinline void bpf_testmod_looooooooooooooooooooooooooooooong_name(void) +{ +} + __bpf_kfunc void bpf_testmod_test_mod_kfunc(int i) { From 53ebef53a657d7957d35dc2b953db64f1bb28065 Mon Sep 17 00:00:00 2001 From: Shung-Hsi Yu Date: Fri, 18 Apr 2025 15:49:43 +0800 Subject: [PATCH 033/135] bpf: Use proper type to calculate bpf_raw_tp_null_args.mask index The calculation of the index used to access the mask field in 'struct bpf_raw_tp_null_args' is done with 'int' type, which could overflow when the tracepoint being attached has more than 8 arguments. While none of the tracepoints mentioned in raw_tp_null_args[] currently have more than 8 arguments, there do exist tracepoints that had more than 8 arguments (e.g. iocost_iocg_forgive_debt), so use the correct type for calculation and avoid Smatch static checker warning. Reported-by: Dan Carpenter Signed-off-by: Shung-Hsi Yu Signed-off-by: Andrii Nakryiko Acked-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/bpf/20250418074946.35569-1-shung-hsi.yu@suse.com Closes: https://lore.kernel.org/r/843a3b94-d53d-42db-93d4-be10a4090146@stanley.mountain/ --- kernel/bpf/btf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 16ba36f34dfa..656ee11aff67 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6829,10 +6829,10 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, /* Is this a func with potential NULL args? */ if (strcmp(tname, raw_tp_null_args[i].func)) continue; - if (raw_tp_null_args[i].mask & (0x1 << (arg * 4))) + if (raw_tp_null_args[i].mask & (0x1ULL << (arg * 4))) info->reg_type |= PTR_MAYBE_NULL; /* Is the current arg IS_ERR? */ - if (raw_tp_null_args[i].mask & (0x2 << (arg * 4))) + if (raw_tp_null_args[i].mask & (0x2ULL << (arg * 4))) ptr_err_raw_tp = true; break; } From 6aca583f90b0eb159cfd79c1b7f28d7c0108aed6 Mon Sep 17 00:00:00 2001 From: Feng Yang Date: Wed, 23 Apr 2025 15:31:51 +0800 Subject: [PATCH 034/135] bpf: Streamline allowed helpers between tracing and base sets Many conditional checks in switch-case are redundant with bpf_base_func_proto and should be removed. Regarding the permission checks bpf_base_func_proto: The permission checks in bpf_prog_load (as outlined below) ensure that the trace has both CAP_BPF and CAP_PERFMON capabilities, thus enabling the use of corresponding prototypes in bpf_base_func_proto without adverse effects. bpf_prog_load ...... bpf_cap = bpf_token_capable(token, CAP_BPF); ...... if (type != BPF_PROG_TYPE_SOCKET_FILTER && type != BPF_PROG_TYPE_CGROUP_SKB && !bpf_cap) goto put_token; ...... if (is_perfmon_prog_type(type) && !bpf_token_capable(token, CAP_PERFMON)) goto put_token; ...... Signed-off-by: Feng Yang Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20250423073151.297103-1-yangfeng59949@163.com --- kernel/trace/bpf_trace.c | 72 ---------------------------------------- 1 file changed, 72 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0f5906f43d7c..52c432a44aeb 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1430,56 +1430,14 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) const struct bpf_func_proto *func_proto; switch (func_id) { - case BPF_FUNC_map_lookup_elem: - return &bpf_map_lookup_elem_proto; - case BPF_FUNC_map_update_elem: - return &bpf_map_update_elem_proto; - case BPF_FUNC_map_delete_elem: - return &bpf_map_delete_elem_proto; - case BPF_FUNC_map_push_elem: - return &bpf_map_push_elem_proto; - case BPF_FUNC_map_pop_elem: - return &bpf_map_pop_elem_proto; - case BPF_FUNC_map_peek_elem: - return &bpf_map_peek_elem_proto; - case BPF_FUNC_map_lookup_percpu_elem: - return &bpf_map_lookup_percpu_elem_proto; - case BPF_FUNC_ktime_get_ns: - return &bpf_ktime_get_ns_proto; - case BPF_FUNC_ktime_get_boot_ns: - return &bpf_ktime_get_boot_ns_proto; - case BPF_FUNC_tail_call: - return &bpf_tail_call_proto; - case BPF_FUNC_get_current_task: - return &bpf_get_current_task_proto; - case BPF_FUNC_get_current_task_btf: - return &bpf_get_current_task_btf_proto; - case BPF_FUNC_task_pt_regs: - return &bpf_task_pt_regs_proto; case BPF_FUNC_get_current_uid_gid: return &bpf_get_current_uid_gid_proto; case BPF_FUNC_get_current_comm: return &bpf_get_current_comm_proto; - case BPF_FUNC_trace_printk: - return bpf_get_trace_printk_proto(); case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; - case BPF_FUNC_get_numa_node_id: - return &bpf_get_numa_node_id_proto; case BPF_FUNC_perf_event_read: return &bpf_perf_event_read_proto; - case BPF_FUNC_get_prandom_u32: - return &bpf_get_prandom_u32_proto; - case BPF_FUNC_probe_read_user: - return &bpf_probe_read_user_proto; - case BPF_FUNC_probe_read_kernel: - return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? - NULL : &bpf_probe_read_kernel_proto; - case BPF_FUNC_probe_read_user_str: - return &bpf_probe_read_user_str_proto; - case BPF_FUNC_probe_read_kernel_str: - return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? - NULL : &bpf_probe_read_kernel_str_proto; #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE case BPF_FUNC_probe_read: return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? @@ -1489,10 +1447,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) NULL : &bpf_probe_read_compat_str_proto; #endif #ifdef CONFIG_CGROUPS - case BPF_FUNC_cgrp_storage_get: - return &bpf_cgrp_storage_get_proto; - case BPF_FUNC_cgrp_storage_delete: - return &bpf_cgrp_storage_delete_proto; case BPF_FUNC_current_task_under_cgroup: return &bpf_current_task_under_cgroup_proto; #endif @@ -1500,20 +1454,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_send_signal_proto; case BPF_FUNC_send_signal_thread: return &bpf_send_signal_thread_proto; - case BPF_FUNC_perf_event_read_value: - return &bpf_perf_event_read_value_proto; - case BPF_FUNC_ringbuf_output: - return &bpf_ringbuf_output_proto; - case BPF_FUNC_ringbuf_reserve: - return &bpf_ringbuf_reserve_proto; - case BPF_FUNC_ringbuf_submit: - return &bpf_ringbuf_submit_proto; - case BPF_FUNC_ringbuf_discard: - return &bpf_ringbuf_discard_proto; - case BPF_FUNC_ringbuf_query: - return &bpf_ringbuf_query_proto; - case BPF_FUNC_jiffies64: - return &bpf_jiffies64_proto; case BPF_FUNC_get_task_stack: return prog->sleepable ? &bpf_get_task_stack_sleepable_proto : &bpf_get_task_stack_proto; @@ -1521,12 +1461,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_copy_from_user_proto; case BPF_FUNC_copy_from_user_task: return &bpf_copy_from_user_task_proto; - case BPF_FUNC_snprintf_btf: - return &bpf_snprintf_btf_proto; - case BPF_FUNC_per_cpu_ptr: - return &bpf_per_cpu_ptr_proto; - case BPF_FUNC_this_cpu_ptr: - return &bpf_this_cpu_ptr_proto; case BPF_FUNC_task_storage_get: if (bpf_prog_check_recur(prog)) return &bpf_task_storage_get_recur_proto; @@ -1535,18 +1469,12 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (bpf_prog_check_recur(prog)) return &bpf_task_storage_delete_recur_proto; return &bpf_task_storage_delete_proto; - case BPF_FUNC_for_each_map_elem: - return &bpf_for_each_map_elem_proto; - case BPF_FUNC_snprintf: - return &bpf_snprintf_proto; case BPF_FUNC_get_func_ip: return &bpf_get_func_ip_proto_tracing; case BPF_FUNC_get_branch_snapshot: return &bpf_get_branch_snapshot_proto; case BPF_FUNC_find_vma: return &bpf_find_vma_proto; - case BPF_FUNC_trace_vprintk: - return bpf_get_trace_vprintk_proto(); default: break; } From 1271a40eeafa8e9b5b76c4d02e2b3812cbc3c280 Mon Sep 17 00:00:00 2001 From: KaFai Wan Date: Wed, 23 Apr 2025 20:13:28 +0800 Subject: [PATCH 035/135] bpf: Allow access to const void pointer arguments in tracing programs Adding support to access arguments with const void pointer arguments in tracing programs. Currently we allow tracing programs to access void pointers. If we try to access argument which is pointer to const void like 2nd argument in kfree, verifier will fail to load the program with; 0: R1=ctx() R10=fp0 ; asm volatile ("r2 = *(u64 *)(r1 + 8); "); 0: (79) r2 = *(u64 *)(r1 +8) func 'kfree' arg1 type UNKNOWN is not a struct Changing the is_int_ptr to void and generic integer check and renaming it to is_void_or_int_ptr. Signed-off-by: KaFai Wan Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20250423121329.3163461-2-mannkafai@gmail.com --- kernel/bpf/btf.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 656ee11aff67..a91822bae043 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6383,12 +6383,11 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) return prog->aux->attach_btf; } -static bool is_int_ptr(struct btf *btf, const struct btf_type *t) +static bool is_void_or_int_ptr(struct btf *btf, const struct btf_type *t) { /* skip modifiers */ t = btf_type_skip_modifiers(btf, t->type, NULL); - - return btf_type_is_int(t); + return btf_type_is_void(t) || btf_type_is_int(t); } static u32 get_ctx_arg_idx(struct btf *btf, const struct btf_type *func_proto, @@ -6776,14 +6775,11 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, } } - if (t->type == 0) - /* This is a pointer to void. - * It is the same as scalar from the verifier safety pov. - * No further pointer walking is allowed. - */ - return true; - - if (is_int_ptr(btf, t)) + /* + * If it's a pointer to void, it's the same as scalar from the verifier + * safety POV. Either way, no futher pointer walking is allowed. + */ + if (is_void_or_int_ptr(btf, t)) return true; /* this is a pointer to another type */ From 4c0a42c50021ee509f159c1f8a22efb35987c941 Mon Sep 17 00:00:00 2001 From: KaFai Wan Date: Wed, 23 Apr 2025 20:13:29 +0800 Subject: [PATCH 036/135] selftests/bpf: Add test to access const void pointer argument in tracing program Adding verifier test for accessing const void pointer argument in tracing programs. The test program loads 1st argument of bpf_fentry_test10 function which is const void pointer and checks that verifier allows that. Signed-off-by: KaFai Wan Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20250423121329.3163461-3-mannkafai@gmail.com --- net/bpf/test_run.c | 8 +++++++- .../selftests/bpf/progs/verifier_btf_ctx_access.c | 12 ++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 7cb192cbd65f..aaf13a7d58ed 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -569,6 +569,11 @@ __bpf_kfunc u32 bpf_fentry_test9(u32 *a) return *a; } +int noinline bpf_fentry_test10(const void *a) +{ + return (long)a; +} + void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo) { } @@ -699,7 +704,8 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || bpf_fentry_test8(&arg) != 0 || - bpf_fentry_test9(&retval) != 0) + bpf_fentry_test9(&retval) != 0 || + bpf_fentry_test10((void *)0) != 0) goto out; break; case BPF_MODIFY_RETURN: diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c index 28b939572cda..03942cec07e5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c @@ -65,4 +65,16 @@ __naked void ctx_access_u32_pointer_reject_8(void) " ::: __clobber_all); } +SEC("fentry/bpf_fentry_test10") +__description("btf_ctx_access const void pointer accept") +__success __retval(0) +__naked void ctx_access_const_void_pointer_accept(void) +{ + asm volatile (" \ + r2 = *(u64 *)(r1 + 0); /* load 1st argument value (const void pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; From 60400cd2b9bed537e6a2a8b580cfc3271e1aa672 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 16 Apr 2025 14:47:58 +0200 Subject: [PATCH 037/135] selftests/bpf: Set MACs during veth creation in tc_redirect tc_redirect/tc_redirect_dtime fails intermittently on some systems with: (network_helpers.c:303: errno: Operation now in progress) Failed to connect to server The problem is that on these systems systemd-networkd and systemd-udevd are installed in the default configuration, which includes: /usr/lib/systemd/network/99-default.link /usr/lib/udev/rules.d/80-net-setup-link.rules These configs instruct systemd to change MAC addresses of newly created interfaces, which includes the ones created by BPF selftests. In this particular case it causes SYN+ACK packets to be dropped, because they get the PACKET_OTHERHOST type - the fact that this causes a connect() on a blocking socket to return -EINPROGRESS looks like a bug, which needs to be investigated separately. systemd won't change the MAC address if the kernel reports that it was already set by userspace; the NET_ADDR_SET check in link_generate_new_hw_addr() is responsible for this. In order to eliminate the race window between systemd and the test, set MAC addresses during link creation. Ignore checkpatch's "quoted string split across lines" warning, since it points to a command line, and not a user-visible message. Signed-off-by: Ilya Leoshkevich Signed-off-by: Martin KaFai Lau Link: https://patch.msgid.link/20250416124845.584362-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/prog_tests/tc_redirect.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index c85798966aec..76d72a59365e 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -56,6 +56,8 @@ #define MAC_DST_FWD "00:11:22:33:44:55" #define MAC_DST "00:22:33:44:55:66" +#define MAC_SRC_FWD "00:33:44:55:66:77" +#define MAC_SRC "00:44:55:66:77:88" #define IFADDR_STR_LEN 18 #define PING_ARGS "-i 0.2 -c 3 -w 10 -q" @@ -207,11 +209,10 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) int err; if (result->dev_mode == MODE_VETH) { - SYS(fail, "ip link add src type veth peer name src_fwd"); - SYS(fail, "ip link add dst type veth peer name dst_fwd"); - - SYS(fail, "ip link set dst_fwd address " MAC_DST_FWD); - SYS(fail, "ip link set dst address " MAC_DST); + SYS(fail, "ip link add src address " MAC_SRC " type veth " + "peer name src_fwd address " MAC_SRC_FWD); + SYS(fail, "ip link add dst address " MAC_DST " type veth " + "peer name dst_fwd address " MAC_DST_FWD); } else if (result->dev_mode == MODE_NETKIT) { err = create_netkit(NETKIT_L3, "src", "src_fwd"); if (!ASSERT_OK(err, "create_ifindex_src")) From 4cc20482143c6dd009ea0c99762bb4bdeac98ec2 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Wed, 23 Apr 2025 11:06:32 +0800 Subject: [PATCH 038/135] bpf, docs: Fix non-standard line break Even though the kernel's coding-style document does not explicitly state this, we generally put a newline after the semicolon of every C language statement to enhance code readability. Adjust the placement of newlines to adhere to this convention. Reported-by: Chen Linxuan Signed-off-by: WangYuli Reviewed-by: Yanteng Si Link: https://lore.kernel.org/r/DB66473733449DB0+20250423030632.17626-1-wangyuli@uniontech.com Signed-off-by: Alexei Starovoitov --- Documentation/bpf/bpf_iterators.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/bpf/bpf_iterators.rst b/Documentation/bpf/bpf_iterators.rst index 7f514cb6b052..385cd05aabf5 100644 --- a/Documentation/bpf/bpf_iterators.rst +++ b/Documentation/bpf/bpf_iterators.rst @@ -323,8 +323,8 @@ Now, in the userspace program, pass the pointer of struct to the :: - link = bpf_program__attach_iter(prog, &opts); iter_fd = - bpf_iter_create(bpf_link__fd(link)); + link = bpf_program__attach_iter(prog, &opts); + iter_fd = bpf_iter_create(bpf_link__fd(link)); If both *tid* and *pid* are zero, an iterator created from this struct ``bpf_iter_attach_opts`` will include every opened file of every task in the From ddfd1f30b5badcd06c199fa519bec5f0f54892e0 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 24 Apr 2025 18:41:25 +0200 Subject: [PATCH 039/135] selftests/bpf: Fix arena_spin_lock.c build dependency Changing bpf_arena_spin_lock.h does not lead to recompiling arena_spin_lock.c. By convention, all BPF progs depend on all header files in progs/, so move this header file there. There are no other users besides arena_spin_lock.c. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250424165525.154403-2-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/{ => progs}/bpf_arena_spin_lock.h | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tools/testing/selftests/bpf/{ => progs}/bpf_arena_spin_lock.h (100%) diff --git a/tools/testing/selftests/bpf/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h similarity index 100% rename from tools/testing/selftests/bpf/bpf_arena_spin_lock.h rename to tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h From 0240e5a9431cb48f980fd44f913d7f0886b0aded Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 24 Apr 2025 18:41:26 +0200 Subject: [PATCH 040/135] selftests/bpf: Fix arena_spin_lock on systems with less than 16 CPUs test_arena_spin_lock_size() explicitly requires having at least 2 CPUs, but if the machine has less than 16, then pthread_setaffinity_np() call in spin_lock_thread() fails. Cap threads to the number of CPUs. Alternative solutions are raising the number of required CPUs to 16, or pinning multiple threads to the same CPU, but they are not that useful. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250424165525.154403-3-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/arena_spin_lock.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c index 7565fc7690c2..0223fce4db2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c @@ -51,9 +51,11 @@ static void test_arena_spin_lock_size(int size) struct arena_spin_lock *skel; pthread_t thread_id[16]; int prog_fd, i, err; + int nthreads; void *ret; - if (get_nprocs() < 2) { + nthreads = MIN(get_nprocs(), ARRAY_SIZE(thread_id)); + if (nthreads < 2) { test__skip(); return; } @@ -66,25 +68,25 @@ static void test_arena_spin_lock_size(int size) goto end; } skel->bss->cs_count = size; - skel->bss->limit = repeat * 16; + skel->bss->limit = repeat * nthreads; - ASSERT_OK(pthread_barrier_init(&barrier, NULL, 16), "barrier init"); + ASSERT_OK(pthread_barrier_init(&barrier, NULL, nthreads), "barrier init"); prog_fd = bpf_program__fd(skel->progs.prog); - for (i = 0; i < 16; i++) { + for (i = 0; i < nthreads; i++) { err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd); if (!ASSERT_OK(err, "pthread_create")) goto end_barrier; } - for (i = 0; i < 16; i++) { + for (i = 0; i < nthreads; i++) { if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join")) goto end_barrier; if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd")) goto end_barrier; } - ASSERT_EQ(skel->bss->counter, repeat * 16, "check counter value"); + ASSERT_EQ(skel->bss->counter, repeat * nthreads, "check counter value"); end_barrier: pthread_barrier_destroy(&barrier); From be5521991506552c0873371694e4a2cb263e1b9c Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Thu, 24 Apr 2025 18:41:27 +0200 Subject: [PATCH 041/135] selftests/bpf: Fix endianness issue in __qspinlock declaration Copy the big-endian field declarations from qspinlock_types.h, otherwise some properties won't hold on big-endian systems. For example, assigning lock->val = 1 should result in lock->locked == 1, which is not the case there. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250424165525.154403-4-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/progs/bpf_arena_spin_lock.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h index 4e29c31c4ef8..d67466c1ff77 100644 --- a/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h +++ b/tools/testing/selftests/bpf/progs/bpf_arena_spin_lock.h @@ -32,6 +32,7 @@ extern unsigned long CONFIG_NR_CPUS __kconfig; struct __qspinlock { union { atomic_t val; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ struct { u8 locked; u8 pending; @@ -40,6 +41,17 @@ struct __qspinlock { u16 locked_pending; u16 tail; }; +#else + struct { + u16 tail; + u16 locked_pending; + }; + struct { + u8 reserved[2]; + u8 pending; + u8 locked; + }; +#endif }; }; From 64821d25f05ac468d435e61669ae745ce5a633ea Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Thu, 24 Apr 2025 00:39:01 +0800 Subject: [PATCH 042/135] libbpf: Remove sample_period init in perf_buffer It seems that sample_period is not used in perf buffer. Actually, only wakeup_events are meaningful to enable events aggregation for wakeup notification. Remove sample_period setting code to avoid confusion. Fixes: fb84b8224655 ("libbpf: add perf buffer API") Signed-off-by: Tao Chen Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Acked-by: Namhyung Kim Link: https://lore.kernel.org/bpf/20250423163901.2983689-1-chen.dylane@linux.dev --- tools/lib/bpf/libbpf.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fbb991c6bf19..080c699582c7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -13375,7 +13375,6 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, attr.config = PERF_COUNT_SW_BPF_OUTPUT; attr.type = PERF_TYPE_SOFTWARE; attr.sample_type = PERF_SAMPLE_RAW; - attr.sample_period = sample_period; attr.wakeup_events = sample_period; p.attr = &attr; From 91dbac4076537b464639953c055c460d2bdfc7ea Mon Sep 17 00:00:00 2001 From: Jonathan Wiepert Date: Thu, 24 Apr 2025 18:14:57 -0400 Subject: [PATCH 043/135] Use thread-safe function pointer in libbpf_print This patch fixes a thread safety bug where libbpf_print uses the global variable storing the print function pointer rather than the local variable that had the print function set via __atomic_load_n. Fixes: f1cb927cdb62 ("libbpf: Ensure print callback usage is thread-safe") Signed-off-by: Jonathan Wiepert Signed-off-by: Andrii Nakryiko Acked-by: Mykyta Yatsenko Link: https://lore.kernel.org/bpf/20250424221457.793068-1-jonathan.wiepert@gmail.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 080c699582c7..617cfb9a7ff5 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -286,7 +286,7 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...) old_errno = errno; va_start(args, format); - __libbpf_pr(level, format, args); + print_fn(level, format, args); va_end(args); errno = old_errno; From 8e64c387c942229c551d0f23de4d9993d3a2acb6 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Tue, 29 Apr 2025 17:10:42 +0100 Subject: [PATCH 044/135] libbpf: Add identical pointer detection to btf_dedup_is_equiv() Recently as a side-effect of commit ac053946f5c4 ("compiler.h: introduce TYPEOF_UNQUAL() macro") issues were observed in deduplication between modules and kernel BTF such that a large number of kernel types were not deduplicated so were found in module BTF (task_struct, bpf_prog etc). The root cause appeared to be a failure to dedup struct types, specifically those with members that were pointers with __percpu annotations. The issue in dedup is at the point that we are deduplicating structures, we have not yet deduplicated reference types like pointers. If multiple copies of a pointer point at the same (deduplicated) integer as in this case, we do not see them as identical. Special handling already exists to deal with structures and arrays, so add pointer handling here too. Reported-by: Alexei Starovoitov Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250429161042.2069678-1-alan.maguire@oracle.com --- tools/lib/bpf/btf.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 24fc71ce5631..b7513d4cce55 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -4396,6 +4396,19 @@ static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id return true; } +static bool btf_dedup_identical_ptrs(struct btf_dedup *d, __u32 id1, __u32 id2) +{ + struct btf_type *t1, *t2; + + t1 = btf_type_by_id(d->btf, id1); + t2 = btf_type_by_id(d->btf, id2); + + if (!btf_is_ptr(t1) || !btf_is_ptr(t2)) + return false; + + return t1->type == t2->type; +} + /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph @@ -4528,6 +4541,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, */ if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) return 1; + /* A similar case is again observed for PTRs. */ + if (btf_dedup_identical_ptrs(d, hypot_type_id, cand_id)) + return 1; return 0; } From 38d976c32d85ef12dcd2b8a231196f7049548477 Mon Sep 17 00:00:00 2001 From: "T.J. Mercier" Date: Mon, 28 Apr 2025 18:02:54 +0000 Subject: [PATCH 045/135] selftests/bpf: Fix kmem_cache iterator draining The closing parentheses around the read syscall is misplaced, causing single byte reads from the iterator instead of buf sized reads. While the end result is the same, many more read calls than necessary are performed. $ tools/testing/selftests/bpf/vmtest.sh "./test_progs -t kmem_cache_iter" 145/1 kmem_cache_iter/check_task_struct:OK 145/2 kmem_cache_iter/check_slabinfo:OK 145/3 kmem_cache_iter/open_coded_iter:OK 145 kmem_cache_iter:OK Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED Fixes: a496d0cdc84d ("selftests/bpf: Add a test for kmem_cache_iter") Signed-off-by: T.J. Mercier Signed-off-by: Martin KaFai Lau Acked-by: Song Liu Acked-by: Namhyung Kim Link: https://patch.msgid.link/20250428180256.1482899-1-tjmercier@google.com --- tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c index 8e13a3416a21..1de14b111931 100644 --- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c @@ -104,7 +104,7 @@ void test_kmem_cache_iter(void) goto destroy; memset(buf, 0, sizeof(buf)); - while (read(iter_fd, buf, sizeof(buf) > 0)) { + while (read(iter_fd, buf, sizeof(buf)) > 0) { /* Read out all contents */ printf("%s", buf); } From 358b1c0f56ebb6996fcec7dcdcf6bae5dcbc8b6c Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Wed, 30 Apr 2025 12:08:20 +0000 Subject: [PATCH 046/135] libbpf: Use proper errno value in linker Return values of the linker_append_sec_data() and the linker_append_elf_relos() functions are propagated all the way up to users of libbpf API. In some error cases these functions return -1 which will be seen as -EPERM from user's point of view. Instead, return a more reasonable -EINVAL. Fixes: faf6ed321cf6 ("libbpf: Add BPF static linker APIs") Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250430120820.2262053-1-a.s.protopopov@gmail.com --- tools/lib/bpf/linker.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 56f5068e2eba..a469e5d4fee7 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -1376,7 +1376,7 @@ static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj } else { if (!secs_match(dst_sec, src_sec)) { pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name); - return -1; + return -EINVAL; } /* "license" and "version" sections are deduped */ @@ -2223,7 +2223,7 @@ static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *ob } } else if (!secs_match(dst_sec, src_sec)) { pr_warn("sections %s are not compatible\n", src_sec->sec_name); - return -1; + return -EINVAL; } /* shdr->sh_link points to SYMTAB */ From 7b05f43155cb128aa06a226afdbc3daa8d75b358 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 28 Apr 2025 23:06:39 +0200 Subject: [PATCH 047/135] bpf: Replace offsetof() with struct_size() Compared to offsetof(), struct_size() provides additional compile-time checks for structs with flexible arrays (e.g., __must_be_array()). No functional changes intended. Signed-off-by: Thorsten Blum Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250428210638.30219-2-thorsten.blum@linux.dev --- kernel/bpf/syscall.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 64c3393e8270..df33d19c5c3b 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -693,7 +694,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) if (IS_ERR_OR_NULL(rec)) return NULL; - size = offsetof(struct btf_record, fields[rec->cnt]); + size = struct_size(rec, fields, rec->cnt); new_rec = kmemdup(rec, size, GFP_KERNEL | __GFP_NOWARN); if (!new_rec) return ERR_PTR(-ENOMEM); @@ -748,7 +749,7 @@ bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *r return false; if (rec_a->cnt != rec_b->cnt) return false; - size = offsetof(struct btf_record, fields[rec_a->cnt]); + size = struct_size(rec_a, fields, rec_a->cnt); /* btf_parse_fields uses kzalloc to allocate a btf_record, so unused * members are zeroed out. So memcmp is safe to do without worrying * about padding/unused fields. From 714070c4cb7a10ff57450a618a936775f3036245 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 28 Apr 2025 17:44:02 +0200 Subject: [PATCH 048/135] bpf: Allow XDP dev-bound programs to perform XDP_REDIRECT into maps In the current implementation if the program is dev-bound to a specific device, it will not be possible to perform XDP_REDIRECT into a DEVMAP or CPUMAP even if the program is running in the driver NAPI context and it is not attached to any map entry. This seems in contrast with the explanation available in bpf_prog_map_compatible routine. Fix the issue introducing __bpf_prog_map_compatible utility routine in order to avoid bpf_prog_is_dev_bound() check running bpf_check_tail_call() at program load time (bpf_prog_select_runtime()). Continue forbidding to attach a dev-bound program to XDP maps (BPF_MAP_TYPE_PROG_ARRAY, BPF_MAP_TYPE_DEVMAP and BPF_MAP_TYPE_CPUMAP). Fixes: 3d76a4d3d4e59 ("bpf: XDP metadata RX kfuncs") Signed-off-by: Lorenzo Bianconi Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- kernel/bpf/core.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ba6b6118cf50..a3e571688421 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2358,8 +2358,8 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, return 0; } -bool bpf_prog_map_compatible(struct bpf_map *map, - const struct bpf_prog *fp) +static bool __bpf_prog_map_compatible(struct bpf_map *map, + const struct bpf_prog *fp) { enum bpf_prog_type prog_type = resolve_prog_type(fp); bool ret; @@ -2368,14 +2368,6 @@ bool bpf_prog_map_compatible(struct bpf_map *map, if (fp->kprobe_override) return false; - /* XDP programs inserted into maps are not guaranteed to run on - * a particular netdev (and can run outside driver context entirely - * in the case of devmap and cpumap). Until device checks - * are implemented, prohibit adding dev-bound programs to program maps. - */ - if (bpf_prog_is_dev_bound(aux)) - return false; - spin_lock(&map->owner.lock); if (!map->owner.type) { /* There's no owner yet where we could check for @@ -2409,6 +2401,19 @@ bool bpf_prog_map_compatible(struct bpf_map *map, return ret; } +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp) +{ + /* XDP programs inserted into maps are not guaranteed to run on + * a particular netdev (and can run outside driver context entirely + * in the case of devmap and cpumap). Until device checks + * are implemented, prohibit adding dev-bound programs to program maps. + */ + if (bpf_prog_is_dev_bound(fp->aux)) + return false; + + return __bpf_prog_map_compatible(map, fp); +} + static int bpf_check_tail_call(const struct bpf_prog *fp) { struct bpf_prog_aux *aux = fp->aux; @@ -2421,7 +2426,7 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) if (!map_type_contains_progs(map)) continue; - if (!bpf_prog_map_compatible(map, fp)) { + if (!__bpf_prog_map_compatible(map, fp)) { ret = -EINVAL; goto out; } From 3678331ca781aa1c15b2a2f3ad7bed5a61f716d1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 28 Apr 2025 17:44:03 +0200 Subject: [PATCH 049/135] selftests/bpf: xdp_metadata: Check XDP_REDIRCT support for dev-bound progs Improve xdp_metadata bpf selftest in order to check it is possible for a XDP dev-bound program to perform XDP_REDIRECT into a DEVMAP but it is still not allowed to attach a XDP dev-bound program to a DEVMAP entry. Signed-off-by: Lorenzo Bianconi Signed-off-by: Martin KaFai Lau Acked-by: Stanislav Fomichev --- .../selftests/bpf/prog_tests/xdp_metadata.c | 22 ++++++++++++++++++- .../selftests/bpf/progs/xdp_metadata.c | 13 +++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c index 3d47878ef6bf..19f92affc2da 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c @@ -351,9 +351,10 @@ void test_xdp_metadata(void) struct xdp_metadata2 *bpf_obj2 = NULL; struct xdp_metadata *bpf_obj = NULL; struct bpf_program *new_prog, *prog; + struct bpf_devmap_val devmap_e = {}; + struct bpf_map *prog_arr, *devmap; struct nstoken *tok = NULL; __u32 queue_id = QUEUE_ID; - struct bpf_map *prog_arr; struct xsk tx_xsk = {}; struct xsk rx_xsk = {}; __u32 val, key = 0; @@ -409,6 +410,13 @@ void test_xdp_metadata(void) bpf_program__set_ifindex(prog, rx_ifindex); bpf_program__set_flags(prog, BPF_F_XDP_DEV_BOUND_ONLY); + /* Make sure we can load a dev-bound program that performs + * XDP_REDIRECT into a devmap. + */ + new_prog = bpf_object__find_program_by_name(bpf_obj->obj, "redirect"); + bpf_program__set_ifindex(new_prog, rx_ifindex); + bpf_program__set_flags(new_prog, BPF_F_XDP_DEV_BOUND_ONLY); + if (!ASSERT_OK(xdp_metadata__load(bpf_obj), "load skeleton")) goto out; @@ -423,6 +431,18 @@ void test_xdp_metadata(void) "update prog_arr")) goto out; + /* Make sure we can't add dev-bound programs to devmaps. */ + devmap = bpf_object__find_map_by_name(bpf_obj->obj, "dev_map"); + if (!ASSERT_OK_PTR(devmap, "no dev_map found")) + goto out; + + devmap_e.bpf_prog.fd = val; + if (!ASSERT_ERR(bpf_map__update_elem(devmap, &key, sizeof(key), + &devmap_e, sizeof(devmap_e), + BPF_ANY), + "update dev_map")) + goto out; + /* Attach BPF program to RX interface. */ ret = bpf_xdp_attach(rx_ifindex, diff --git a/tools/testing/selftests/bpf/progs/xdp_metadata.c b/tools/testing/selftests/bpf/progs/xdp_metadata.c index 31ca229bb3c0..09bb8a038d52 100644 --- a/tools/testing/selftests/bpf/progs/xdp_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_metadata.c @@ -19,6 +19,13 @@ struct { __type(value, __u32); } prog_arr SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_devmap_val)); + __uint(max_entries, 1); +} dev_map SEC(".maps"); + extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, __u64 *timestamp) __ksym; extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash, @@ -95,4 +102,10 @@ int rx(struct xdp_md *ctx) return bpf_redirect_map(&xsk, ctx->rx_queue_index, XDP_PASS); } +SEC("xdp") +int redirect(struct xdp_md *ctx) +{ + return bpf_redirect_map(&dev_map, ctx->rx_queue_index, XDP_PASS); +} + char _license[] SEC("license") = "GPL"; From f263336a41da287c5aebd35be8f1e0422e49bc5c Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Wed, 30 Apr 2025 14:42:49 +0100 Subject: [PATCH 050/135] selftests/bpf: Add btf dedup test covering module BTF dedup Recently issues were observed with module BTF deduplication failures [1]. Add a dedup selftest that ensures that core kernel types are referenced from split BTF as base BTF types. To do this use bpf_testmod functions which utilize core kernel types, specifically ssize_t bpf_testmod_test_write(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t len); __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk); __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb); For each of these ensure that the types they reference - struct file, struct kobject, struct bin_attr etc - are in base BTF. Note that because bpf_testmod.ko is built with distilled base BTF the associated reference types - i.e. the PTR that points at a "struct file" - will be in split BTF. As a result the test resolves typedef and pointer references and verifies the pointed-at or typedef'ed type is in base BTF. Because we use BTF from /sys/kernel/btf/bpf_testmod relocation has occurred for the referenced types and they will be base - not distilled base - types. For large-scale dedup issues, we see such types appear in split BTF and as a result this test fails. Hence it is proposed as a test which will fail when large-scale dedup issues have occurred. [1] https://lore.kernel.org/dwarves/CAADnVQL+-LiJGXwxD3jEUrOonO-fX0SZC8496dVzUXvfkB7gYQ@mail.gmail.com/ Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20250430134249.2451066-1-alan.maguire@oracle.com --- .../bpf/prog_tests/btf_dedup_split.c | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c index d9024c7a892a..5bc15bb6b7ce 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c @@ -440,6 +440,105 @@ cleanup: btf__free(btf1); } +/* Ensure module split BTF dedup worked correctly; when dedup fails badly + * core kernel types are in split BTF also, so ensure that references to + * such types point at base - not split - BTF. + * + * bpf_testmod_test_write() has multiple core kernel type parameters; + * + * ssize_t + * bpf_testmod_test_write(struct file *file, struct kobject *kobj, + * struct bin_attribute *bin_attr, + * char *buf, loff_t off, size_t len); + * + * Ensure each of the FUNC_PROTO params is a core kernel type. + * + * Do the same for + * + * __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk); + * + * ...and + * + * __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb); + * + */ +const char *mod_funcs[] = { + "bpf_testmod_test_write", + "bpf_kfunc_call_test3", + "bpf_kfunc_call_test_pass_ctx" +}; + +static void test_split_module(void) +{ + struct btf *vmlinux_btf, *btf1 = NULL; + int i, nr_base_types; + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux_btf")) + return; + nr_base_types = btf__type_cnt(vmlinux_btf); + if (!ASSERT_GT(nr_base_types, 0, "nr_base_types")) + goto cleanup; + + btf1 = btf__parse_split("/sys/kernel/btf/bpf_testmod", vmlinux_btf); + if (!ASSERT_OK_PTR(btf1, "split_btf")) + return; + + for (i = 0; i < ARRAY_SIZE(mod_funcs); i++) { + const struct btf_param *p; + const struct btf_type *t; + __u16 vlen; + __u32 id; + int j; + + id = btf__find_by_name_kind(btf1, mod_funcs[i], BTF_KIND_FUNC); + if (!ASSERT_GE(id, nr_base_types, "func_id")) + goto cleanup; + t = btf__type_by_id(btf1, id); + if (!ASSERT_OK_PTR(t, "func_id_type")) + goto cleanup; + t = btf__type_by_id(btf1, t->type); + if (!ASSERT_OK_PTR(t, "func_proto_id_type")) + goto cleanup; + if (!ASSERT_EQ(btf_is_func_proto(t), true, "is_func_proto")) + goto cleanup; + vlen = btf_vlen(t); + + for (j = 0, p = btf_params(t); j < vlen; j++, p++) { + /* bpf_testmod uses resilient split BTF, so any + * reference types will be added to split BTF and their + * associated targets will be base BTF types; for example + * for a "struct sock *" the PTR will be in split BTF + * while the "struct sock" will be in base. + * + * In some cases like loff_t we have to resolve + * multiple typedefs hence the while() loop below. + * + * Note that resilient split BTF generation depends + * on pahole version, so we do not assert that + * reference types are in split BTF, as if pahole + * does not support resilient split BTF they will + * also be base BTF types. + */ + id = p->type; + do { + t = btf__type_by_id(btf1, id); + if (!ASSERT_OK_PTR(t, "param_ref_type")) + goto cleanup; + if (!btf_is_mod(t) && !btf_is_ptr(t) && !btf_is_typedef(t)) + break; + id = t->type; + } while (true); + + if (!ASSERT_LT(id, nr_base_types, "verify_base_type")) + goto cleanup; + } + } +cleanup: + btf__free(btf1); + btf__free(vmlinux_btf); +} + void test_btf_dedup_split() { if (test__start_subtest("split_simple")) @@ -450,4 +549,6 @@ void test_btf_dedup_split() test_split_fwd_resolve(); if (test__start_subtest("split_dup_struct_in_cu")) test_split_dup_struct_in_cu(); + if (test__start_subtest("split_module")) + test_split_module(); } From a28fe3160362f92b0a1ae4a711244c75b41f4955 Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Fri, 2 May 2025 11:52:21 -0700 Subject: [PATCH 051/135] selftests/bpf: Remove sockmap_ktls disconnect_after_delete test "sockmap_ktls disconnect_after_delete" is effectively moot after disconnect has been disabled for TLS [1][2]. Remove the test completely. [1] https://lore.kernel.org/bpf/20250416170246.2438524-1-ihor.solodrai@linux.dev/ [2] https://lore.kernel.org/netdev/20250404180334.3224206-1-kuba@kernel.org/ Signed-off-by: Ihor Solodrai Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250502185221.1556192-1-isolodrai@meta.com --- .../selftests/bpf/prog_tests/sockmap_ktls.c | 67 ------------------- 1 file changed, 67 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 71b18fb1f719..3044f54b16d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -61,71 +61,6 @@ static int create_ktls_pairs(int family, int sotype, int *c, int *p) return 0; } -static int tcp_server(int family) -{ - int err, s; - - s = socket(family, SOCK_STREAM, 0); - if (!ASSERT_GE(s, 0, "socket")) - return -1; - - err = listen(s, SOMAXCONN); - if (!ASSERT_OK(err, "listen")) - return -1; - - return s; -} - -static int disconnect(int fd) -{ - struct sockaddr unspec = { AF_UNSPEC }; - - return connect(fd, &unspec, sizeof(unspec)); -} - -/* Disconnect (unhash) a kTLS socket after removing it from sockmap. */ -static void test_sockmap_ktls_disconnect_after_delete(int family, int map) -{ - struct sockaddr_storage addr = {0}; - socklen_t len = sizeof(addr); - int err, cli, srv, zero = 0; - - srv = tcp_server(family); - if (srv == -1) - return; - - err = getsockname(srv, (struct sockaddr *)&addr, &len); - if (!ASSERT_OK(err, "getsockopt")) - goto close_srv; - - cli = socket(family, SOCK_STREAM, 0); - if (!ASSERT_GE(cli, 0, "socket")) - goto close_srv; - - err = connect(cli, (struct sockaddr *)&addr, len); - if (!ASSERT_OK(err, "connect")) - goto close_cli; - - err = bpf_map_update_elem(map, &zero, &cli, 0); - if (!ASSERT_OK(err, "bpf_map_update_elem")) - goto close_cli; - - err = setsockopt(cli, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); - if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) - goto close_cli; - - err = bpf_map_delete_elem(map, &zero); - if (!ASSERT_OK(err, "bpf_map_delete_elem")) - goto close_cli; - - err = disconnect(cli); - -close_cli: - close(cli); -close_srv: - close(srv); -} - static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map) { struct sockaddr_storage addr = {}; @@ -313,8 +248,6 @@ static void run_tests(int family, enum bpf_map_type map_type) if (!ASSERT_GE(map, 0, "bpf_map_create")) return; - if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type))) - test_sockmap_ktls_disconnect_after_delete(family, map); if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type))) test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map); From 41d4ce6df3f4945341ec509a840cc002a413b6cc Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Fri, 2 May 2025 19:30:31 +0000 Subject: [PATCH 052/135] bpf: Fix uninitialized values in BPF_{CORE,PROBE}_READ With the latest LLVM bpf selftests build will fail with the following error message: progs/profiler.inc.h:710:31: error: default initialization of an object of type 'typeof ((parent_task)->real_cred->uid.val)' (aka 'const unsigned int') leaves the object uninitialized and is incompatible with C++ [-Werror,-Wdefault-const-init-unsafe] 710 | proc_exec_data->parent_uid = BPF_CORE_READ(parent_task, real_cred, uid.val); | ^ tools/testing/selftests/bpf/tools/include/bpf/bpf_core_read.h:520:35: note: expanded from macro 'BPF_CORE_READ' 520 | ___type((src), a, ##__VA_ARGS__) __r; \ | ^ This happens because BPF_CORE_READ (and other macro) declare the variable __r using the ___type macro which can inherit const modifier from intermediate types. Fix this by using __typeof_unqual__, when supported. (And when it is not supported, the problem shouldn't appear, as older compilers haven't complained.) Fixes: 792001f4f7aa ("libbpf: Add user-space variants of BPF_CORE_READ() family of macros") Fixes: a4b09a9ef945 ("libbpf: Add non-CO-RE variants of BPF_CORE_READ() macro family") Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250502193031.3522715-1-a.s.protopopov@gmail.com --- tools/lib/bpf/bpf_core_read.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index c0e13cdf9660..b997c68bd945 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -388,7 +388,13 @@ extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak; #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__) +#if defined(__clang__) && (__clang_major__ >= 19) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#elif defined(__GNUC__) && (__GNUC__ >= 14) +#define ___type(...) __typeof_unqual__(___arrow(__VA_ARGS__)) +#else #define ___type(...) typeof(___arrow(__VA_ARGS__)) +#endif #define ___read(read_fn, dst, src_type, src, accessor) \ read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor) From 41948afcf503b5667637a0b3ab279a061f559bec Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sat, 3 May 2025 17:15:13 +0200 Subject: [PATCH 053/135] bpf: Replace offsetof() with struct_size() Compared to offsetof(), struct_size() provides additional compile-time checks for structs with flexible arrays (e.g., __must_be_array()). No functional changes intended. Signed-off-by: Thorsten Blum Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250503151513.343931-2-thorsten.blum@linux.dev --- kernel/bpf/btf.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a91822bae043..6b21ca67070c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -3957,7 +3958,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type /* This needs to be kzalloc to zero out padding and unused fields, see * comment in btf_record_equal. */ - rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN); + rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL | __GFP_NOWARN); if (!rec) return ERR_PTR(-ENOMEM); @@ -5583,7 +5584,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) if (id < 0) continue; - new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]), + new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1), GFP_KERNEL | __GFP_NOWARN); if (!new_aof) { ret = -ENOMEM; @@ -5610,7 +5611,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) if (ret != BTF_FIELD_FOUND) continue; - new_aof = krealloc(aof, offsetof(struct btf_id_set, ids[aof->cnt + 1]), + new_aof = krealloc(aof, struct_size(new_aof, ids, aof->cnt + 1), GFP_KERNEL | __GFP_NOWARN); if (!new_aof) { ret = -ENOMEM; @@ -5647,7 +5648,7 @@ btf_parse_struct_metas(struct bpf_verifier_log *log, struct btf *btf) continue; parse: tab_cnt = tab ? tab->cnt : 0; - new_tab = krealloc(tab, offsetof(struct btf_struct_metas, types[tab_cnt + 1]), + new_tab = krealloc(tab, struct_size(new_tab, types, tab_cnt + 1), GFP_KERNEL | __GFP_NOWARN); if (!new_tab) { ret = -ENOMEM; @@ -8559,7 +8560,7 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, /* Grow set */ set = krealloc(tab->sets[hook], - offsetof(struct btf_id_set8, pairs[set_cnt + add_set->cnt]), + struct_size(set, pairs, set_cnt + add_set->cnt), GFP_KERNEL | __GFP_NOWARN); if (!set) { ret = -ENOMEM; @@ -8845,7 +8846,7 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c } tab = krealloc(btf->dtor_kfunc_tab, - offsetof(struct btf_id_dtor_kfunc_tab, dtors[tab_cnt + add_cnt]), + struct_size(tab, dtors, tab_cnt + add_cnt), GFP_KERNEL | __GFP_NOWARN); if (!tab) { ret = -ENOMEM; @@ -9403,8 +9404,7 @@ btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, tab = btf->struct_ops_tab; if (!tab) { - tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]), - GFP_KERNEL); + tab = kzalloc(struct_size(tab, ops, 4), GFP_KERNEL); if (!tab) return -ENOMEM; tab->capacity = 4; @@ -9417,8 +9417,7 @@ btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, if (tab->cnt == tab->capacity) { new_tab = krealloc(tab, - offsetof(struct btf_struct_ops_tab, - ops[tab->capacity * 2]), + struct_size(tab, ops, tab->capacity * 2), GFP_KERNEL); if (!new_tab) return -ENOMEM; From 62e23f183839c3d718ab36ce1c0cf7cb4b9c05a4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 1 May 2025 16:52:31 -0700 Subject: [PATCH 054/135] libbpf: Improve BTF dedup handling of "identical" BTF types BTF dedup has a strong assumption that compiler with deduplicate identical types within any given compilation unit (i.e., .c file). This property is used when establishing equilvalence of two subgraphs of types. Unfortunately, this property doesn't always holds in practice. We've seen cases of having truly identical structs, unions, array definitions, and, most recently, even pointers to the same type being duplicated within CU. Previously, we mitigated this on a case-by-case basis, adding a few simple heuristics for validating that two BTF types (having two different type IDs) are structurally the same. But this approach scales poorly, and we can have more weird cases come up in the future. So let's take a half-step back, and implement a bit more generic structural equivalence check, recursively. We still limit it to reasonable depth to avoid long reference loops. Depth-wise limiting of potentially cyclical graph isn't great, but as I mentioned below doesn't seem to be detrimental performance-wise. We can always improve this in the future with per-type visited markers, if necessary. Performance-wise this doesn't seem too affect vmlinux BTF dedup, which makes sense because this logic kicks in not so frequently and only if we already established a canonical candidate type match, but suddenly find a different (but probably identical) type. Signed-off-by: Andrii Nakryiko Reviewed-by: Alan Maguire Link: https://lore.kernel.org/r/20250501235231.1339822-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/btf.c | 143 ++++++++++++++++++++++++++++---------------- 1 file changed, 92 insertions(+), 51 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index b7513d4cce55..f18d7e6a453c 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -4356,59 +4356,109 @@ static inline __u16 btf_fwd_kind(struct btf_type *t) return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT; } -/* Check if given two types are identical ARRAY definitions */ -static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2) +static bool btf_dedup_identical_types(struct btf_dedup *d, __u32 id1, __u32 id2, int depth) { struct btf_type *t1, *t2; - - t1 = btf_type_by_id(d->btf, id1); - t2 = btf_type_by_id(d->btf, id2); - if (!btf_is_array(t1) || !btf_is_array(t2)) + int k1, k2; +recur: + if (depth <= 0) return false; - return btf_equal_array(t1, t2); -} - -/* Check if given two types are identical STRUCT/UNION definitions */ -static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2) -{ - const struct btf_member *m1, *m2; - struct btf_type *t1, *t2; - int n, i; - t1 = btf_type_by_id(d->btf, id1); t2 = btf_type_by_id(d->btf, id2); - if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2)) + k1 = btf_kind(t1); + k2 = btf_kind(t2); + if (k1 != k2) return false; - if (!btf_shallow_equal_struct(t1, t2)) - return false; - - m1 = btf_members(t1); - m2 = btf_members(t2); - for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { - if (m1->type != m2->type && - !btf_dedup_identical_arrays(d, m1->type, m2->type) && - !btf_dedup_identical_structs(d, m1->type, m2->type)) + switch (k1) { + case BTF_KIND_UNKN: /* VOID */ + return true; + case BTF_KIND_INT: + return btf_equal_int_tag(t1, t2); + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + return btf_compat_enum(t1, t2); + case BTF_KIND_FWD: + case BTF_KIND_FLOAT: + return btf_equal_common(t1, t2); + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_TYPE_TAG: + if (t1->info != t2->info || t1->name_off != t2->name_off) return false; + id1 = t1->type; + id2 = t2->type; + goto recur; + case BTF_KIND_ARRAY: { + struct btf_array *a1, *a2; + + if (!btf_compat_array(t1, t2)) + return false; + + a1 = btf_array(t1); + a2 = btf_array(t1); + + if (a1->index_type != a2->index_type && + !btf_dedup_identical_types(d, a1->index_type, a2->index_type, depth - 1)) + return false; + + if (a1->type != a2->type && + !btf_dedup_identical_types(d, a1->type, a2->type, depth - 1)) + return false; + + return true; } - return true; -} + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: { + const struct btf_member *m1, *m2; + int i, n; -static bool btf_dedup_identical_ptrs(struct btf_dedup *d, __u32 id1, __u32 id2) -{ - struct btf_type *t1, *t2; + if (!btf_shallow_equal_struct(t1, t2)) + return false; - t1 = btf_type_by_id(d->btf, id1); - t2 = btf_type_by_id(d->btf, id2); + m1 = btf_members(t1); + m2 = btf_members(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) { + if (m1->type == m2->type) + continue; + if (!btf_dedup_identical_types(d, m1->type, m2->type, depth - 1)) + return false; + } + return true; + } + case BTF_KIND_FUNC_PROTO: { + const struct btf_param *p1, *p2; + int i, n; - if (!btf_is_ptr(t1) || !btf_is_ptr(t2)) + if (!btf_compat_fnproto(t1, t2)) + return false; + + if (t1->type != t2->type && + !btf_dedup_identical_types(d, t1->type, t2->type, depth - 1)) + return false; + + p1 = btf_params(t1); + p2 = btf_params(t2); + for (i = 0, n = btf_vlen(t1); i < n; i++, p1++, p2++) { + if (p1->type == p2->type) + continue; + if (!btf_dedup_identical_types(d, p1->type, p2->type, depth - 1)) + return false; + } + return true; + } + default: return false; - - return t1->type == t2->type; + } } + /* * Check equivalence of BTF type graph formed by candidate struct/union (we'll * call it "candidate graph" in this description for brevity) to a type graph @@ -4527,22 +4577,13 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, * different fields within the *same* struct. This breaks type * equivalence check, which makes an assumption that candidate * types sub-graph has a consistent and deduped-by-compiler - * types within a single CU. So work around that by explicitly - * allowing identical array types here. + * types within a single CU. And similar situation can happen + * with struct/union sometimes, and event with pointers. + * So accommodate cases like this doing a structural + * comparison recursively, but avoiding being stuck in endless + * loops by limiting the depth up to which we check. */ - if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id)) - return 1; - /* It turns out that similar situation can happen with - * struct/union sometimes, sigh... Handle the case where - * structs/unions are exactly the same, down to the referenced - * type IDs. Anything more complicated (e.g., if referenced - * types are different, but equivalent) is *way more* - * complicated and requires a many-to-many equivalence mapping. - */ - if (btf_dedup_identical_structs(d, hypot_type_id, cand_id)) - return 1; - /* A similar case is again observed for PTRs. */ - if (btf_dedup_identical_ptrs(d, hypot_type_id, cand_id)) + if (btf_dedup_identical_types(d, hypot_type_id, cand_id, 16)) return 1; return 0; } From b183c0123d9ba16e147c990c02a9e6f37cac5df4 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:48 -0700 Subject: [PATCH 055/135] bpf: Check KF_bpf_rbtree_add_impl for the "case KF_ARG_PTR_TO_RB_NODE" In a later patch, two new kfuncs will take the bpf_rb_node pointer arg. struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node); struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node); In the check_kfunc_call, there is a "case KF_ARG_PTR_TO_RB_NODE" to check if the reg->type should be an allocated pointer or should be a non_owning_ref. The later patch will need to ensure that the bpf_rb_node pointer passing to the new bpf_rbtree_{left,right} must be a non_owning_ref. This should be the same requirement as the existing bpf_rbtree_remove. This patch swaps the current "if else" statement. Instead of checking the bpf_rbtree_remove, it checks the bpf_rbtree_add. Then the new bpf_rbtree_{left,right} will fall into the "else" case to make the later patch simpler. bpf_rbtree_add should be the only one that needs an allocated pointer. This should be a no-op change considering there are only two kfunc(s) taking bpf_rb_node pointer arg, rbtree_add and rbtree_remove. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-2-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 54c6953a8b84..2e1ce7debc16 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13200,16 +13200,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return ret; break; case KF_ARG_PTR_TO_RB_NODE: - if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { - if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { - verbose(env, "rbtree_remove node input must be non-owning ref\n"); - return -EINVAL; - } - if (in_rbtree_lock_required_cb(env)) { - verbose(env, "rbtree_remove not allowed in rbtree cb\n"); - return -EINVAL; - } - } else { + if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { verbose(env, "arg#%d expected pointer to allocated object\n", i); return -EINVAL; @@ -13218,6 +13209,15 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ verbose(env, "allocated object must be referenced\n"); return -EINVAL; } + } else { + if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { + verbose(env, "rbtree_remove node input must be non-owning ref\n"); + return -EINVAL; + } + if (in_rbtree_lock_required_cb(env)) { + verbose(env, "rbtree_remove not allowed in rbtree cb\n"); + return -EINVAL; + } } ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); From 7faccdf4b47d2c7674692aecdb5847da0f84dbd4 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:49 -0700 Subject: [PATCH 056/135] bpf: Simplify reg0 marking for the rbtree kfuncs that return a bpf_rb_node pointer The current rbtree kfunc, bpf_rbtree_{first, remove}, returns the bpf_rb_node pointer. The check_kfunc_call currently checks the kfunc btf_id instead of its return pointer type to decide if it needs to do mark_reg_graph_node(reg0) and ref_set_non_owning(reg0). The later patch will add bpf_rbtree_{root,left,right} that will also return a bpf_rb_node pointer. Instead of adding more kfunc btf_id checks to the "if" case, this patch changes the test to check the kfunc's return type. is_rbtree_node_type() function is added to test if a pointer type is a bpf_rb_node. The callers have already skipped the modifiers of the pointer type. A note on the ref_set_non_owning(), although bpf_rbtree_remove() also returns a bpf_rb_node pointer, the bpf_rbtree_remove() has the KF_ACQUIRE flag. Thus, its reg0 will not become non-owning. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-3-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2e1ce7debc16..bf14da00f09a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11987,6 +11987,11 @@ static bool is_kfunc_arg_res_spin_lock(const struct btf *btf, const struct btf_p return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RES_SPIN_LOCK_ID); } +static bool is_rbtree_node_type(const struct btf_type *t) +{ + return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_RB_NODE_ID]); +} + static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, const struct btf_param *arg) { @@ -13750,8 +13755,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct btf_field *field = meta.arg_list_head.field; mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); - } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] || - meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { + } else if (is_rbtree_node_type(ptr_type)) { struct btf_field *field = meta.arg_rbtree_root.field; mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); @@ -13881,7 +13885,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (is_kfunc_ret_null(&meta)) regs[BPF_REG_0].id = id; regs[BPF_REG_0].ref_obj_id = id; - } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { + } else if (is_rbtree_node_type(ptr_type)) { ref_set_non_owning(env, ®s[BPF_REG_0]); } From 9e3e66c553f705de51707c7ddc7f35ce159a8ef1 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:50 -0700 Subject: [PATCH 057/135] bpf: Add bpf_rbtree_{root,left,right} kfunc In a bpf fq implementation that is much closer to the kernel fq, it will need to traverse the rbtree: https://lore.kernel.org/bpf/20250418224652.105998-13-martin.lau@linux.dev/ The much simplified logic that uses the bpf_rbtree_{root,left,right} to traverse the rbtree is like: struct fq_flow { struct bpf_rb_node fq_node; struct bpf_rb_node rate_node; struct bpf_refcount refcount; unsigned long sk_long; }; struct fq_flow_root { struct bpf_spin_lock lock; struct bpf_rb_root root __contains(fq_flow, fq_node); }; struct fq_flow *fq_classify(...) { struct bpf_rb_node *tofree[FQ_GC_MAX]; struct fq_flow_root *root; struct fq_flow *gc_f, *f; struct bpf_rb_node *p; int i, fcnt = 0; /* ... */ f = NULL; bpf_spin_lock(&root->lock); p = bpf_rbtree_root(&root->root); while (can_loop) { if (!p) break; gc_f = bpf_rb_entry(p, struct fq_flow, fq_node); if (gc_f->sk_long == sk_long) { f = bpf_refcount_acquire(gc_f); break; } /* To be removed from the rbtree */ if (fcnt < FQ_GC_MAX && fq_gc_candidate(gc_f, jiffies_now)) tofree[fcnt++] = p; if (gc_f->sk_long > sk_long) p = bpf_rbtree_left(&root->root, p); else p = bpf_rbtree_right(&root->root, p); } /* remove from the rbtree */ for (i = 0; i < fcnt; i++) { p = tofree[i]; tofree[i] = bpf_rbtree_remove(&root->root, p); } bpf_spin_unlock(&root->lock); /* bpf_obj_drop the fq_flow(s) that have just been removed * from the rbtree. */ for (i = 0; i < fcnt; i++) { p = tofree[i]; if (p) { gc_f = bpf_rb_entry(p, struct fq_flow, fq_node); bpf_obj_drop(gc_f); } } return f; } The above simplified code needs to traverse the rbtree for two purposes, 1) find the flow with the desired sk_long value 2) while searching for the sk_long, collect flows that are the fq_gc_candidate. They will be removed from the rbtree. This patch adds the bpf_rbtree_{root,left,right} kfunc to enable the rbtree traversal. The returned bpf_rb_node pointer will be a non-owning reference which is the same as the returned pointer of the exisiting bpf_rbtree_first kfunc. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-4-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 30 ++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 22 ++++++++++++++++++---- 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e3a2662f4e33..36150d340c16 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2366,6 +2366,33 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) return (struct bpf_rb_node *)rb_first_cached(r); } +__bpf_kfunc struct bpf_rb_node *bpf_rbtree_root(struct bpf_rb_root *root) +{ + struct rb_root_cached *r = (struct rb_root_cached *)root; + + return (struct bpf_rb_node *)r->rb_root.rb_node; +} + +__bpf_kfunc struct bpf_rb_node *bpf_rbtree_left(struct bpf_rb_root *root, struct bpf_rb_node *node) +{ + struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; + + if (READ_ONCE(node_internal->owner) != root) + return NULL; + + return (struct bpf_rb_node *)node_internal->rb_node.rb_left; +} + +__bpf_kfunc struct bpf_rb_node *bpf_rbtree_right(struct bpf_rb_root *root, struct bpf_rb_node *node) +{ + struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; + + if (READ_ONCE(node_internal->owner) != root) + return NULL; + + return (struct bpf_rb_node *)node_internal->rb_node.rb_right; +} + /** * bpf_task_acquire - Acquire a reference to a task. A task acquired by this * kfunc which is not stored in a map as a kptr, must be released by calling @@ -3214,6 +3241,9 @@ BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_rbtree_add_impl) BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_rbtree_root, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_rbtree_left, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_rbtree_right, KF_RET_NULL) #ifdef CONFIG_CGROUPS BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bf14da00f09a..51a17e64a0a9 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12081,6 +12081,9 @@ enum special_kfunc_type { KF_bpf_rbtree_remove, KF_bpf_rbtree_add_impl, KF_bpf_rbtree_first, + KF_bpf_rbtree_root, + KF_bpf_rbtree_left, + KF_bpf_rbtree_right, KF_bpf_dynptr_from_skb, KF_bpf_dynptr_from_xdp, KF_bpf_dynptr_slice, @@ -12121,6 +12124,9 @@ BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rbtree_remove) BTF_ID(func, bpf_rbtree_add_impl) BTF_ID(func, bpf_rbtree_first) +BTF_ID(func, bpf_rbtree_root) +BTF_ID(func, bpf_rbtree_left) +BTF_ID(func, bpf_rbtree_right) #ifdef CONFIG_NET BTF_ID(func, bpf_dynptr_from_skb) BTF_ID(func, bpf_dynptr_from_xdp) @@ -12156,6 +12162,9 @@ BTF_ID(func, bpf_rcu_read_unlock) BTF_ID(func, bpf_rbtree_remove) BTF_ID(func, bpf_rbtree_add_impl) BTF_ID(func, bpf_rbtree_first) +BTF_ID(func, bpf_rbtree_root) +BTF_ID(func, bpf_rbtree_left) +BTF_ID(func, bpf_rbtree_right) #ifdef CONFIG_NET BTF_ID(func, bpf_dynptr_from_skb) BTF_ID(func, bpf_dynptr_from_xdp) @@ -12591,7 +12600,10 @@ static bool is_bpf_rbtree_api_kfunc(u32 btf_id) { return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || - btf_id == special_kfunc_list[KF_bpf_rbtree_first]; + btf_id == special_kfunc_list[KF_bpf_rbtree_first] || + btf_id == special_kfunc_list[KF_bpf_rbtree_root] || + btf_id == special_kfunc_list[KF_bpf_rbtree_left] || + btf_id == special_kfunc_list[KF_bpf_rbtree_right]; } static bool is_bpf_iter_num_api_kfunc(u32 btf_id) @@ -12691,7 +12703,9 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, break; case BPF_RB_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || - kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]); + kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || + kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] || + kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]); break; default: verbose(env, "verifier internal error: unexpected graph node argument type %s\n", @@ -13216,11 +13230,11 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ } } else { if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { - verbose(env, "rbtree_remove node input must be non-owning ref\n"); + verbose(env, "%s node input must be non-owning ref\n", func_name); return -EINVAL; } if (in_rbtree_lock_required_cb(env)) { - verbose(env, "rbtree_remove not allowed in rbtree cb\n"); + verbose(env, "%s not allowed in rbtree cb\n", func_name); return -EINVAL; } } From 2ddef1783c43ba81a05dec0a5781ebbc61a3c089 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:51 -0700 Subject: [PATCH 058/135] bpf: Allow refcounted bpf_rb_node used in bpf_rbtree_{remove,left,right} The bpf_rbtree_{remove,left,right} requires the root's lock to be held. They also check the node_internal->owner is still owned by that root before proceeding, so it is safe to allow refcounted bpf_rb_node pointer to be used in these kfuncs. In a bpf fq implementation which is much closer to the kernel fq, https://lore.kernel.org/bpf/20250418224652.105998-13-martin.lau@linux.dev/, a networking flow (allocated by bpf_obj_new) can be added to two different rbtrees. There are cases that the flow is searched from one rbtree, held the refcount of the flow, and then removed from another rbtree: struct fq_flow { struct bpf_rb_node fq_node; struct bpf_rb_node rate_node; struct bpf_refcount refcount; unsigned long sk_long; }; int bpf_fq_enqueue(...) { /* ... */ bpf_spin_lock(&root->lock); while (can_loop) { /* ... */ if (!p) break; gc_f = bpf_rb_entry(p, struct fq_flow, fq_node); if (gc_f->sk_long == sk_long) { f = bpf_refcount_acquire(gc_f); break; } /* ... */ } bpf_spin_unlock(&root->lock); if (f) { bpf_spin_lock(&q->lock); bpf_rbtree_remove(&q->delayed, &f->rate_node); bpf_spin_unlock(&q->lock); } } bpf_rbtree_{left,right} do not need this change but are relaxed together with bpf_rbtree_remove instead of adding extra verifier logic to exclude these kfuncs. To avoid bi-sect failure, this patch also changes the selftests together. The "rbtree_api_remove_unadded_node" is not expecting verifier's error. The test now expects bpf_rbtree_remove(&groot, &m->node) to return NULL. The test uses __retval(0) to ensure this NULL return value. Some of the "only take non-owning..." failure messages are changed also. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-5-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 4 +-- .../testing/selftests/bpf/progs/rbtree_fail.c | 29 ++++++++++--------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 51a17e64a0a9..9093a351b0b3 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13229,8 +13229,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return -EINVAL; } } else { - if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { - verbose(env, "%s node input must be non-owning ref\n", func_name); + if (!type_is_non_owning_ref(reg->type) && !reg->ref_obj_id) { + verbose(env, "%s can only take non-owning or refcounted bpf_rb_node pointer\n", func_name); return -EINVAL; } if (in_rbtree_lock_required_cb(env)) { diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index dbd5eee8e25e..4acb6af2dfe3 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -69,11 +69,11 @@ long rbtree_api_nolock_first(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__retval(0) long rbtree_api_remove_unadded_node(void *ctx) { struct node_data *n, *m; - struct bpf_rb_node *res; + struct bpf_rb_node *res_n, *res_m; n = bpf_obj_new(typeof(*n)); if (!n) @@ -88,19 +88,20 @@ long rbtree_api_remove_unadded_node(void *ctx) bpf_spin_lock(&glock); bpf_rbtree_add(&groot, &n->node, less); - /* This remove should pass verifier */ - res = bpf_rbtree_remove(&groot, &n->node); - n = container_of(res, struct node_data, node); + res_n = bpf_rbtree_remove(&groot, &n->node); - /* This remove shouldn't, m isn't in an rbtree */ - res = bpf_rbtree_remove(&groot, &m->node); - m = container_of(res, struct node_data, node); + res_m = bpf_rbtree_remove(&groot, &m->node); bpf_spin_unlock(&glock); - if (n) - bpf_obj_drop(n); - if (m) - bpf_obj_drop(m); + bpf_obj_drop(m); + if (res_n) + bpf_obj_drop(container_of(res_n, struct node_data, node)); + if (res_m) { + bpf_obj_drop(container_of(res_m, struct node_data, node)); + /* m was not added to the rbtree */ + return 2; + } + return 0; } @@ -178,7 +179,7 @@ err_out: } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer") long rbtree_api_add_release_unlock_escape(void *ctx) { struct node_data *n; @@ -202,7 +203,7 @@ long rbtree_api_add_release_unlock_escape(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") +__failure __msg("bpf_rbtree_remove can only take non-owning or refcounted bpf_rb_node pointer") long rbtree_api_first_release_unlock_escape(void *ctx) { struct bpf_rb_node *res; From 47ada65c5cf91b9cb51abf5bd32513ebd7720941 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:52 -0700 Subject: [PATCH 059/135] selftests/bpf: Add tests for bpf_rbtree_{root,left,right} This patch has a much simplified rbtree usage from the kernel sch_fq qdisc. It has a "struct node_data" which can be added to two different rbtrees which are ordered by different keys. The test first populates both rbtrees. Then search for a lookup_key from the "groot0" rbtree. Once the lookup_key is found, that node refcount is taken. The node is then removed from another "groot1" rbtree. While searching the lookup_key, the test will also try to remove all rbnodes in the path leading to the lookup_key. The test_{root,left,right}_spinlock_true tests ensure that the return value of the bpf_rbtree functions is a non_own_ref node pointer. This is done by forcing an verifier error by calling a helper bpf_jiffies64() while holding the spinlock. The tests then check for the verifier message "call bpf_rbtree...R0=rcu_ptr_or_null_node..." The other test_{root,left,right}_spinlock_false tests ensure that they must be called with spinlock held. Suggested-by: Kumar Kartikeya Dwivedi # Check non_own_ref marking Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-6-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/rbtree.c | 6 + .../selftests/bpf/progs/rbtree_search.c | 206 ++++++++++++++++++ 2 files changed, 212 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/rbtree_search.c diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c index 9818f06c97c5..d8f3d7a45fe9 100644 --- a/tools/testing/selftests/bpf/prog_tests/rbtree.c +++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c @@ -8,6 +8,7 @@ #include "rbtree_fail.skel.h" #include "rbtree_btf_fail__wrong_node_type.skel.h" #include "rbtree_btf_fail__add_wrong_type.skel.h" +#include "rbtree_search.skel.h" static void test_rbtree_add_nodes(void) { @@ -187,3 +188,8 @@ void test_rbtree_fail(void) { RUN_TESTS(rbtree_fail); } + +void test_rbtree_search(void) +{ + RUN_TESTS(rbtree_search); +} diff --git a/tools/testing/selftests/bpf/progs/rbtree_search.c b/tools/testing/selftests/bpf/progs/rbtree_search.c new file mode 100644 index 000000000000..098ef970fac1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rbtree_search.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + struct bpf_refcount ref; + struct bpf_rb_node r0; + struct bpf_rb_node r1; + int key0; + int key1; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock0; +private(A) struct bpf_rb_root groot0 __contains(node_data, r0); + +private(B) struct bpf_spin_lock glock1; +private(B) struct bpf_rb_root groot1 __contains(node_data, r1); + +#define rb_entry(ptr, type, member) container_of(ptr, type, member) +#define NR_NODES 16 + +int zero = 0; + +static bool less0(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = rb_entry(a, struct node_data, r0); + node_b = rb_entry(b, struct node_data, r0); + + return node_a->key0 < node_b->key0; +} + +static bool less1(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_data *node_a; + struct node_data *node_b; + + node_a = rb_entry(a, struct node_data, r1); + node_b = rb_entry(b, struct node_data, r1); + + return node_a->key1 < node_b->key1; +} + +SEC("syscall") +__retval(0) +long rbtree_search(void *ctx) +{ + struct bpf_rb_node *rb_n, *rb_m, *gc_ns[NR_NODES]; + long lookup_key = NR_NODES / 2; + struct node_data *n, *m; + int i, nr_gc = 0; + + for (i = zero; i < NR_NODES && can_loop; i++) { + n = bpf_obj_new(typeof(*n)); + if (!n) + return __LINE__; + + m = bpf_refcount_acquire(n); + + n->key0 = i; + m->key1 = i; + + bpf_spin_lock(&glock0); + bpf_rbtree_add(&groot0, &n->r0, less0); + bpf_spin_unlock(&glock0); + + bpf_spin_lock(&glock1); + bpf_rbtree_add(&groot1, &m->r1, less1); + bpf_spin_unlock(&glock1); + } + + n = NULL; + bpf_spin_lock(&glock0); + rb_n = bpf_rbtree_root(&groot0); + while (can_loop) { + if (!rb_n) { + bpf_spin_unlock(&glock0); + return __LINE__; + } + + n = rb_entry(rb_n, struct node_data, r0); + if (lookup_key == n->key0) + break; + if (nr_gc < NR_NODES) + gc_ns[nr_gc++] = rb_n; + if (lookup_key < n->key0) + rb_n = bpf_rbtree_left(&groot0, rb_n); + else + rb_n = bpf_rbtree_right(&groot0, rb_n); + } + + if (!n || lookup_key != n->key0) { + bpf_spin_unlock(&glock0); + return __LINE__; + } + + for (i = 0; i < nr_gc; i++) { + rb_n = gc_ns[i]; + gc_ns[i] = bpf_rbtree_remove(&groot0, rb_n); + } + + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&glock0); + + for (i = 0; i < nr_gc; i++) { + rb_n = gc_ns[i]; + if (rb_n) { + n = rb_entry(rb_n, struct node_data, r0); + bpf_obj_drop(n); + } + } + + if (!m) + return __LINE__; + + bpf_spin_lock(&glock1); + rb_m = bpf_rbtree_remove(&groot1, &m->r1); + bpf_spin_unlock(&glock1); + bpf_obj_drop(m); + if (!rb_m) + return __LINE__; + bpf_obj_drop(rb_entry(rb_m, struct node_data, r1)); + + return 0; +} + +#define TEST_ROOT(dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_root_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_rb_node *rb_n; \ + __u64 jiffies = 0; \ + \ + if (dolock) \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_root(&groot0); \ + if (rb_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock0); \ + \ + return !!jiffies; \ +} + +#define TEST_LR(op, dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_##op##_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_rb_node *rb_n; \ + struct node_data *n; \ + __u64 jiffies = 0; \ + \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_root(&groot0); \ + if (!rb_n) { \ + bpf_spin_unlock(&glock0); \ + return 1; \ + } \ + n = rb_entry(rb_n, struct node_data, r0); \ + n = bpf_refcount_acquire(n); \ + bpf_spin_unlock(&glock0); \ + if (!n) \ + return 1; \ + \ + if (dolock) \ + bpf_spin_lock(&glock0); \ + rb_n = bpf_rbtree_##op(&groot0, &n->r0); \ + if (rb_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock0); \ + \ + return !!jiffies; \ +} + +/* + * Use a spearate MSG macro instead of passing to TEST_XXX(..., MSG) + * to ensure the message itself is not in the bpf prog lineinfo + * which the verifier includes in its log. + * Otherwise, the test_loader will incorrectly match the prog lineinfo + * instead of the log generated by the verifier. + */ +#define MSG "call bpf_rbtree_root{{.+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_ROOT(true) +#undef MSG +#define MSG "call bpf_rbtree_{{(left|right).+}}; R0{{(_w)?}}=rcu_ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_LR(left, true) +TEST_LR(right, true) +#undef MSG + +#define MSG "bpf_spin_lock at off=0 must be held for bpf_rb_root" +TEST_ROOT(false) +TEST_LR(left, false) +TEST_LR(right, false) +#undef MSG + +char _license[] SEC("license") = "GPL"; From 3fab84f00d3274e1fd19054a409a9c804261e4b9 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:53 -0700 Subject: [PATCH 060/135] bpf: Simplify reg0 marking for the list kfuncs that return a bpf_list_node pointer The next patch will add bpf_list_{front,back} kfuncs to peek the head and tail of a list. Both of them will return a 'struct bpf_list_node *'. Follow the earlier change for rbtree, this patch checks the return btf type is a 'struct bpf_list_node' pointer instead of checking each kfuncs individually to decide if mark_reg_graph_node should be called. This will make the bpf_list_{front,back} kfunc addition easier in the later patch. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-7-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9093a351b0b3..acb2f44316cc 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -11992,6 +11992,11 @@ static bool is_rbtree_node_type(const struct btf_type *t) return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_RB_NODE_ID]); } +static bool is_list_node_type(const struct btf_type *t) +{ + return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_LIST_NODE_ID]); +} + static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, const struct btf_param *arg) { @@ -13764,8 +13769,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); - } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] || - meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) { + } else if (is_list_node_type(ptr_type)) { struct btf_field *field = meta.arg_list_head.field; mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); From fb5b480205bad3936b054b86f7c9d2bd7835caac Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:54 -0700 Subject: [PATCH 061/135] bpf: Add bpf_list_{front,back} kfunc In the kernel fq qdisc implementation, it only needs to look at the fields of the first node in a list but does not always need to remove it from the list. It is more convenient to have a peek kfunc for the list. It works similar to the bpf_rbtree_first(). This patch adds bpf_list_{front,back} kfunc. The verifier is changed such that the kfunc returning "struct bpf_list_node *" will be marked as non-owning. The exception is the KF_ACQUIRE kfunc. The net effect is only the new bpf_list_{front,back} kfuncs will have its return pointer marked as non-owning. Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-8-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 22 ++++++++++++++++++++++ kernel/bpf/verifier.c | 12 ++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 36150d340c16..78cefb41266a 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2293,6 +2293,26 @@ __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) return __bpf_list_del(head, true); } +__bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head) +{ + struct list_head *h = (struct list_head *)head; + + if (list_empty(h) || unlikely(!h->next)) + return NULL; + + return (struct bpf_list_node *)h->next; +} + +__bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head) +{ + struct list_head *h = (struct list_head *)head; + + if (list_empty(h) || unlikely(!h->next)) + return NULL; + + return (struct bpf_list_node *)h->prev; +} + __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct bpf_rb_node *node) { @@ -3236,6 +3256,8 @@ BTF_ID_FLAGS(func, bpf_list_push_front_impl) BTF_ID_FLAGS(func, bpf_list_push_back_impl) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index acb2f44316cc..99aa2c890e7b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12079,6 +12079,8 @@ enum special_kfunc_type { KF_bpf_list_push_back_impl, KF_bpf_list_pop_front, KF_bpf_list_pop_back, + KF_bpf_list_front, + KF_bpf_list_back, KF_bpf_cast_to_kern_ctx, KF_bpf_rdonly_cast, KF_bpf_rcu_read_lock, @@ -12124,6 +12126,8 @@ BTF_ID(func, bpf_list_push_front_impl) BTF_ID(func, bpf_list_push_back_impl) BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_back) +BTF_ID(func, bpf_list_front) +BTF_ID(func, bpf_list_back) BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rbtree_remove) @@ -12160,6 +12164,8 @@ BTF_ID(func, bpf_list_push_front_impl) BTF_ID(func, bpf_list_push_back_impl) BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_back) +BTF_ID(func, bpf_list_front) +BTF_ID(func, bpf_list_back) BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rcu_read_lock) @@ -12598,7 +12604,9 @@ static bool is_bpf_list_api_kfunc(u32 btf_id) return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || btf_id == special_kfunc_list[KF_bpf_list_pop_front] || - btf_id == special_kfunc_list[KF_bpf_list_pop_back]; + btf_id == special_kfunc_list[KF_bpf_list_pop_back] || + btf_id == special_kfunc_list[KF_bpf_list_front] || + btf_id == special_kfunc_list[KF_bpf_list_back]; } static bool is_bpf_rbtree_api_kfunc(u32 btf_id) @@ -13903,7 +13911,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (is_kfunc_ret_null(&meta)) regs[BPF_REG_0].id = id; regs[BPF_REG_0].ref_obj_id = id; - } else if (is_rbtree_node_type(ptr_type)) { + } else if (is_rbtree_node_type(ptr_type) || is_list_node_type(ptr_type)) { ref_set_non_owning(env, ®s[BPF_REG_0]); } From 29318b4d5dc348367dee8ad0ad7b89350cf1161e Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Mon, 5 May 2025 18:58:55 -0700 Subject: [PATCH 062/135] selftests/bpf: Add test for bpf_list_{front,back} This patch adds the "list_peek" test to use the new bpf_list_{front,back} kfunc. The test_{front,back}* tests ensure that the return value is a non_own_ref node pointer and requires the spinlock to be held. Suggested-by: Kumar Kartikeya Dwivedi # check non_own_ref marking Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250506015857.817950-9-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/linked_list.c | 6 + .../selftests/bpf/progs/linked_list_peek.c | 113 ++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/linked_list_peek.c diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 77d07e0a4a55..5266c7022863 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -7,6 +7,7 @@ #include "linked_list.skel.h" #include "linked_list_fail.skel.h" +#include "linked_list_peek.skel.h" static char log_buf[1024 * 1024]; @@ -805,3 +806,8 @@ void test_linked_list(void) test_linked_list_success(LIST_IN_LIST, true); test_linked_list_success(TEST_ALL, false); } + +void test_linked_list_peek(void) +{ + RUN_TESTS(linked_list_peek); +} diff --git a/tools/testing/selftests/bpf/progs/linked_list_peek.c b/tools/testing/selftests/bpf/progs/linked_list_peek.c new file mode 100644 index 000000000000..264e81bfb287 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/linked_list_peek.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + struct bpf_list_node l; + int key; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_list_head ghead __contains(node_data, l); + +#define list_entry(ptr, type, member) container_of(ptr, type, member) +#define NR_NODES 16 + +int zero = 0; + +SEC("syscall") +__retval(0) +long list_peek(void *ctx) +{ + struct bpf_list_node *l_n; + struct node_data *n; + int i, err = 0; + + bpf_spin_lock(&glock); + l_n = bpf_list_front(&ghead); + bpf_spin_unlock(&glock); + if (l_n) + return __LINE__; + + bpf_spin_lock(&glock); + l_n = bpf_list_back(&ghead); + bpf_spin_unlock(&glock); + if (l_n) + return __LINE__; + + for (i = zero; i < NR_NODES && can_loop; i++) { + n = bpf_obj_new(typeof(*n)); + if (!n) + return __LINE__; + n->key = i; + bpf_spin_lock(&glock); + bpf_list_push_back(&ghead, &n->l); + bpf_spin_unlock(&glock); + } + + bpf_spin_lock(&glock); + + l_n = bpf_list_front(&ghead); + if (!l_n) { + err = __LINE__; + goto done; + } + + n = list_entry(l_n, struct node_data, l); + if (n->key != 0) { + err = __LINE__; + goto done; + } + + l_n = bpf_list_back(&ghead); + if (!l_n) { + err = __LINE__; + goto done; + } + + n = list_entry(l_n, struct node_data, l); + if (n->key != NR_NODES - 1) { + err = __LINE__; + goto done; + } + +done: + bpf_spin_unlock(&glock); + return err; +} + +#define TEST_FB(op, dolock) \ +SEC("syscall") \ +__failure __msg(MSG) \ +long test_##op##_spinlock_##dolock(void *ctx) \ +{ \ + struct bpf_list_node *l_n; \ + __u64 jiffies = 0; \ + \ + if (dolock) \ + bpf_spin_lock(&glock); \ + l_n = bpf_list_##op(&ghead); \ + if (l_n) \ + jiffies = bpf_jiffies64(); \ + if (dolock) \ + bpf_spin_unlock(&glock); \ + \ + return !!jiffies; \ +} + +#define MSG "call bpf_list_{{(front|back).+}}; R0{{(_w)?}}=ptr_or_null_node_data(id={{[0-9]+}},non_own_ref" +TEST_FB(front, true) +TEST_FB(back, true) +#undef MSG + +#define MSG "bpf_spin_lock at off=0 must be held for bpf_list_head" +TEST_FB(front, false) +TEST_FB(back, false) +#undef MSG + +char _license[] SEC("license") = "GPL"; From 43745d11bfd9683abdf08ad7a5cc403d6a9ffd15 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Mon, 28 Apr 2025 21:15:36 +0000 Subject: [PATCH 063/135] bpftool: Fix regression of "bpftool cgroup tree" EINVAL on older kernels If cgroup_has_attached_progs queries an attach type not supported by the running kernel, due to the kernel being older than the bpftool build, it would encounter an -EINVAL from BPF_PROG_QUERY syscall. Prior to commit 98b303c9bf05 ("bpftool: Query only cgroup-related attach types"), this EINVAL would be ignored by the function, allowing the function to only consider supported attach types. The commit changed so that, instead of querying all attach types, only attach types from the array `cgroup_attach_types` is queried. The assumption is that because these are only cgroup attach types, they should all be supported. Unfortunately this assumption may be false when the kernel is older than the bpftool build, where the attach types queried by bpftool is not yet implemented in the kernel. This would result in errors such as: $ bpftool cgroup tree CgroupPath ID AttachType AttachFlags Name Error: can't query bpf programs attached to /sys/fs/cgroup: Invalid argument This patch restores the logic of ignoring EINVAL from prior to that patch. Fixes: 98b303c9bf05 ("bpftool: Query only cgroup-related attach types") Reported-by: Sagarika Sharma Reported-by: Minh-Anh Nguyen Signed-off-by: YiFei Zhu Signed-off-by: Andrii Nakryiko Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20250428211536.1651456-1-zhuyifei@google.com --- tools/bpf/bpftool/cgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 93b139bfb988..3f1d6be51215 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -221,7 +221,7 @@ static int cgroup_has_attached_progs(int cgroup_fd) for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) { int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]); - if (count < 0) + if (count < 0 && errno != EINVAL) return -1; if (count > 0) { From b69d4413aa1961930fbf9ffad8376d577378daf9 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 7 May 2025 13:32:32 -0700 Subject: [PATCH 064/135] bpftool: Fix cgroup command to only show cgroup bpf programs The netkit program is not a cgroup bpf program and should not be shown in the output of the "bpftool cgroup show" command. However, if the netkit device happens to have ifindex 3, the "bpftool cgroup show" command will output the netkit bpf program as well: > ip -d link show dev nk1 3: nk1@if2: ... link/ether ... netkit mode ... > bpftool net show tc: nk1(3) netkit/peer tw_ns_nk2phy prog_id 469447 > bpftool cgroup show /sys/fs/cgroup/... ID AttachType AttachFlags Name ... ... ... 469447 netkit_peer tw_ns_nk2phy The reason is that the target_fd (which is the cgroup_fd here) and the target_ifindex are in a union in the uapi/linux/bpf.h. The bpftool iterates all values in "enum bpf_attach_type" which includes non cgroup attach types like netkit. The cgroup_fd is usually 3 here, so the bug is triggered when the netkit ifindex just happens to be 3 as well. The bpftool's cgroup.c already has a list of cgroup-only attach type defined in "cgroup_attach_types[]". This patch fixes it by iterating over "cgroup_attach_types[]" instead of "__MAX_BPF_ATTACH_TYPE". Cc: Quentin Monnet Reported-by: Takshak Chahande Signed-off-by: Martin KaFai Lau Acked-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/r/20250507203232.1420762-1-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/cgroup.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 3f1d6be51215..944ebe21a216 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -318,11 +318,11 @@ static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type, static int do_show(int argc, char **argv) { - enum bpf_attach_type type; int has_attached_progs; const char *path; int cgroup_fd; int ret = -1; + unsigned int i; query_flags = 0; @@ -370,14 +370,14 @@ static int do_show(int argc, char **argv) "AttachFlags", "Name"); btf_vmlinux = libbpf_find_kernel_btf(); - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { + for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) { /* * Not all attach types may be supported, so it's expected, * that some requests will fail. * If we were able to get the show for at least one * attach type, let's return 0. */ - if (show_bpf_progs(cgroup_fd, type, 0) == 0) + if (show_bpf_progs(cgroup_fd, cgroup_attach_types[i], 0) == 0) ret = 0; } @@ -400,9 +400,9 @@ exit: static int do_show_tree_fn(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftw) { - enum bpf_attach_type type; int has_attached_progs; int cgroup_fd; + unsigned int i; if (typeflag != FTW_D) return 0; @@ -434,8 +434,8 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb, } btf_vmlinux = libbpf_find_kernel_btf(); - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) - show_bpf_progs(cgroup_fd, type, ftw->level); + for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) + show_bpf_progs(cgroup_fd, cgroup_attach_types[i], ftw->level); if (errno == EINVAL) /* Last attach type does not support query. From fce7bd8e385a6c282d70bed39d82ce805eeafbee Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:42:45 +0000 Subject: [PATCH 065/135] bpf/verifier: Handle BPF_LOAD_ACQ instructions in insn_def_regno() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation for supporting BPF load-acquire and store-release instructions for architectures where bpf_jit_needs_zext() returns true (e.g. riscv64), make insn_def_regno() handle load-acquires properly. Acked-by: Björn Töpel Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Reviewed-by: Pu Lehui Link: https://lore.kernel.org/r/09cb2aec979aaed9d16db41f0f5b364de39377c0.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 99aa2c890e7b..28f5a7899bd6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3649,16 +3649,16 @@ static int insn_def_regno(const struct bpf_insn *insn) case BPF_ST: return -1; case BPF_STX: - if ((BPF_MODE(insn->code) == BPF_ATOMIC || - BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) && - (insn->imm & BPF_FETCH)) { + if (BPF_MODE(insn->code) == BPF_ATOMIC || + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) { if (insn->imm == BPF_CMPXCHG) return BPF_REG_0; - else + else if (insn->imm == BPF_LOAD_ACQ) + return insn->dst_reg; + else if (insn->imm & BPF_FETCH) return insn->src_reg; - } else { - return -1; } + return -1; default: return insn->dst_reg; } From 118ae46b794271ebcfcc9bab95e1c766198c8209 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Wed, 7 May 2025 03:42:55 +0000 Subject: [PATCH 066/135] bpf, riscv64: Introduce emit_load_*() and emit_store_*() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're planning to add support for the load-acquire and store-release BPF instructions. Define emit_load_() and emit_store_() to enable/facilitate the (re)use of their code. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Tested-by: Peilin Ye Signed-off-by: Andrea Parri [yepeilin@google.com: cosmetic change to commit title] Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/fce89473a5748e1631d18a5917d953460d1ae0d0.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit_comp64.c | 242 +++++++++++++++++++------------- 1 file changed, 143 insertions(+), 99 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index ca60db75199d..953b6a20c69f 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -473,6 +473,140 @@ static inline void emit_kcfi(u32 hash, struct rv_jit_context *ctx) emit(hash, ctx); } +static int emit_load_8(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lb(rd, off, rs), ctx); + else + emit(rv_lbu(rd, off, rs), ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lb(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + return ctx->ninsns - insns_start; +} + +static int emit_load_16(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lh(rd, off, rs), ctx); + else + emit(rv_lhu(rd, off, rs), ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lh(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + return ctx->ninsns - insns_start; +} + +static int emit_load_32(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lw(rd, off, rs), ctx); + else + emit(rv_lwu(rd, off, rs), ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + if (sign_ext) + emit(rv_lw(rd, 0, RV_REG_T1), ctx); + else + emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + return ctx->ninsns - insns_start; +} + +static int emit_load_64(bool sign_ext, u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + int insns_start; + + if (is_12b_int(off)) { + insns_start = ctx->ninsns; + emit_ld(rd, off, rs, ctx); + return ctx->ninsns - insns_start; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + insns_start = ctx->ninsns; + emit_ld(rd, 0, RV_REG_T1, ctx); + return ctx->ninsns - insns_start; +} + +static void emit_store_8(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit(rv_sb(rd, off, rs), ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit(rv_sb(RV_REG_T1, 0, rs), ctx); +} + +static void emit_store_16(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit(rv_sh(rd, off, rs), ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit(rv_sh(RV_REG_T1, 0, rs), ctx); +} + +static void emit_store_32(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit_sw(rd, off, rs, ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit_sw(RV_REG_T1, 0, rs, ctx); +} + +static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) +{ + if (is_12b_int(off)) { + emit_sd(rd, off, rs, ctx); + return; + } + + emit_imm(RV_REG_T1, off, ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit_sd(RV_REG_T1, 0, rs, ctx); +} + static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, struct rv_jit_context *ctx) { @@ -1650,8 +1784,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: { - int insn_len, insns_start; bool sign_ext; + int insn_len; sign_ext = BPF_MODE(insn->code) == BPF_MEMSX || BPF_MODE(insn->code) == BPF_PROBE_MEMSX; @@ -1663,78 +1797,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, switch (BPF_SIZE(code)) { case BPF_B: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lb(rd, off, rs), ctx); - else - emit(rv_lbu(rd, off, rs), ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lb(rd, 0, RV_REG_T1), ctx); - else - emit(rv_lbu(rd, 0, RV_REG_T1), ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_8(sign_ext, rd, off, rs, ctx); break; case BPF_H: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lh(rd, off, rs), ctx); - else - emit(rv_lhu(rd, off, rs), ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lh(rd, 0, RV_REG_T1), ctx); - else - emit(rv_lhu(rd, 0, RV_REG_T1), ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_16(sign_ext, rd, off, rs, ctx); break; case BPF_W: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lw(rd, off, rs), ctx); - else - emit(rv_lwu(rd, off, rs), ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - if (sign_ext) - emit(rv_lw(rd, 0, RV_REG_T1), ctx); - else - emit(rv_lwu(rd, 0, RV_REG_T1), ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_32(sign_ext, rd, off, rs, ctx); break; case BPF_DW: - if (is_12b_int(off)) { - insns_start = ctx->ninsns; - emit_ld(rd, off, rs, ctx); - insn_len = ctx->ninsns - insns_start; - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); - insns_start = ctx->ninsns; - emit_ld(rd, 0, RV_REG_T1, ctx); - insn_len = ctx->ninsns - insns_start; + insn_len = emit_load_64(sign_ext, rd, off, rs, ctx); break; } @@ -1879,44 +1951,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* STX: *(size *)(dst + off) = src */ case BPF_STX | BPF_MEM | BPF_B: - if (is_12b_int(off)) { - emit(rv_sb(rd, off, rs), ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit(rv_sb(RV_REG_T1, 0, rs), ctx); + emit_store_8(rd, off, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_H: - if (is_12b_int(off)) { - emit(rv_sh(rd, off, rs), ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit(rv_sh(RV_REG_T1, 0, rs), ctx); + emit_store_16(rd, off, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_W: - if (is_12b_int(off)) { - emit_sw(rd, off, rs, ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit_sw(RV_REG_T1, 0, rs, ctx); + emit_store_32(rd, off, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_DW: - if (is_12b_int(off)) { - emit_sd(rd, off, rs, ctx); - break; - } - - emit_imm(RV_REG_T1, off, ctx); - emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); - emit_sd(RV_REG_T1, 0, rs, ctx); + emit_store_64(rd, off, rs, ctx); break; case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: From 8afd3170d5116385740aef8ab77d10b83f9b8e60 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Wed, 7 May 2025 03:43:01 +0000 Subject: [PATCH 067/135] bpf, riscv64: Support load-acquire and store-release instructions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support BPF load-acquire (BPF_LOAD_ACQ) and store-release (BPF_STORE_REL) instructions in the riscv64 JIT compiler. For example, consider the following 64-bit load-acquire (assuming little-endian): db 10 00 00 00 01 00 00 r1 = load_acquire((u64 *)(r1 + 0x0)) 95 00 00 00 00 00 00 00 exit opcode (0xdb): BPF_ATOMIC | BPF_DW | BPF_STX imm (0x00000100): BPF_LOAD_ACQ The JIT compiler will emit an LD instruction followed by a FENCE R,RW instruction for the above, e.g.: ld x7,0(x6) fence r,rw Similarly, consider the following 16-bit store-release: cb 21 00 00 10 01 00 00 store_release((u16 *)(r1 + 0x0), w2) 95 00 00 00 00 00 00 00 exit opcode (0xcb): BPF_ATOMIC | BPF_H | BPF_STX imm (0x00000110): BPF_STORE_REL A FENCE RW,W instruction followed by an SH instruction will be emitted, e.g.: fence rw,w sh x2,0(x4) 8-bit and 16-bit load-acquires are zero-extending (cf., LBU, LHU). The verifier always rejects misaligned load-acquires/store-releases (even if BPF_F_ANY_ALIGNMENT is set), so the emitted load and store instructions are guaranteed to be single-copy atomic. Introduce primitives to emit the relevant (and the most common/used in the kernel) fences, i.e. fences with R -> RW, RW -> W and RW -> RW. Rename emit_atomic() to emit_atomic_rmw() to make it clear that it only handles RMW atomics, and replace its is64 parameter to allow to perform the required checks on the opsize (BPF_SIZE(code)). Acked-by: Björn Töpel Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Andrea Parri Co-developed-by: Peilin Ye Signed-off-by: Peilin Ye Reviewed-by: Pu Lehui Link: https://lore.kernel.org/r/3059c560e537ad43ed19055d2ebbd970c698095a.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit.h | 15 +++++++ arch/riscv/net/bpf_jit_comp64.c | 75 ++++++++++++++++++++++++++++++--- 2 files changed, 85 insertions(+), 5 deletions(-) diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index 1d1c78d4cff1..e7b032dfd17f 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -608,6 +608,21 @@ static inline u32 rv_fence(u8 pred, u8 succ) return rv_i_insn(imm11_0, 0, 0, 0, 0xf); } +static inline void emit_fence_r_rw(struct rv_jit_context *ctx) +{ + emit(rv_fence(0x2, 0x3), ctx); +} + +static inline void emit_fence_rw_w(struct rv_jit_context *ctx) +{ + emit(rv_fence(0x3, 0x1), ctx); +} + +static inline void emit_fence_rw_rw(struct rv_jit_context *ctx) +{ + emit(rv_fence(0x3, 0x3), ctx); +} + static inline u32 rv_nop(void) { return rv_i_insn(0, 0, 0, 0, 0x13); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 953b6a20c69f..8767f032f2de 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -607,11 +607,65 @@ static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) emit_sd(RV_REG_T1, 0, rs, ctx); } -static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, - struct rv_jit_context *ctx) +static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_jit_context *ctx) +{ + switch (imm) { + /* dst_reg = load_acquire(src_reg + off16) */ + case BPF_LOAD_ACQ: + switch (BPF_SIZE(code)) { + case BPF_B: + emit_load_8(false, rd, off, rs, ctx); + break; + case BPF_H: + emit_load_16(false, rd, off, rs, ctx); + break; + case BPF_W: + emit_load_32(false, rd, off, rs, ctx); + break; + case BPF_DW: + emit_load_64(false, rd, off, rs, ctx); + break; + } + emit_fence_r_rw(ctx); + break; + /* store_release(dst_reg + off16, src_reg) */ + case BPF_STORE_REL: + emit_fence_rw_w(ctx); + switch (BPF_SIZE(code)) { + case BPF_B: + emit_store_8(rd, off, rs, ctx); + break; + case BPF_H: + emit_store_16(rd, off, rs, ctx); + break; + case BPF_W: + emit_store_32(rd, off, rs, ctx); + break; + case BPF_DW: + emit_store_64(rd, off, rs, ctx); + break; + } + break; + default: + pr_err_once("bpf-jit: invalid atomic load/store opcode %02x\n", imm); + return -EINVAL; + } + + return 0; +} + +static int emit_atomic_rmw(u8 rd, u8 rs, s16 off, s32 imm, u8 code, + struct rv_jit_context *ctx) { u8 r0; int jmp_offset; + bool is64; + + if (BPF_SIZE(code) != BPF_W && BPF_SIZE(code) != BPF_DW) { + pr_err_once("bpf-jit: 1- and 2-byte RMW atomics are not supported\n"); + return -EINVAL; + } + is64 = BPF_SIZE(code) == BPF_DW; if (off) { if (is_12b_int(off)) { @@ -688,9 +742,14 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx); jmp_offset = ninsns_rvoff(-6); emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx); - emit(rv_fence(0x3, 0x3), ctx); + emit_fence_rw_rw(ctx); break; + default: + pr_err_once("bpf-jit: invalid atomic RMW opcode %02x\n", imm); + return -EINVAL; } + + return 0; } #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) @@ -1962,10 +2021,16 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_STX | BPF_MEM | BPF_DW: emit_store_64(rd, off, rs, ctx); break; + case BPF_STX | BPF_ATOMIC | BPF_B: + case BPF_STX | BPF_ATOMIC | BPF_H: case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: - emit_atomic(rd, rs, off, imm, - BPF_SIZE(code) == BPF_DW, ctx); + if (bpf_atomic_is_load_store(insn)) + ret = emit_atomic_ld_st(rd, rs, off, imm, code, ctx); + else + ret = emit_atomic_rmw(rd, rs, off, imm, code, ctx); + if (ret) + return ret; break; case BPF_STX | BPF_PROBE_MEM32 | BPF_B: From db7a3822b5f474b09db0a776e91f17c8b7d32137 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:43:07 +0000 Subject: [PATCH 068/135] bpf, riscv64: Skip redundant zext instruction after load-acquire MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the verifier inserts a zext instruction right after every 8-, 16- or 32-bit load-acquire, which is already zero-extending. Skip such redundant zext instructions. While we are here, update that already-obsolete comment about "skip the next instruction" in build_body(). Also change emit_atomic_rmw()'s parameters to keep it consistent with emit_atomic_ld_st(). Note that checking 'insn[1]' relies on 'insn' not being the last instruction, which should have been guaranteed by the verifier; we already use 'insn[1]' elsewhere in the file for similar purposes. Additionally, we don't check if 'insn[1]' is actually a zext for our load-acquire's dst_reg, or some other registers - in other words, here we are relying on the verifier to always insert a redundant zext right after a 8/16/32-bit load-acquire, for its dst_reg. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/10e90e0eab042f924d35ad0d1c1f7ca29f673152.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- arch/riscv/net/bpf_jit_comp64.c | 23 ++++++++++++++++++----- arch/riscv/net/bpf_jit_core.c | 3 +-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 8767f032f2de..10e01ff06312 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -607,8 +607,13 @@ static void emit_store_64(u8 rd, s32 off, u8 rs, struct rv_jit_context *ctx) emit_sd(RV_REG_T1, 0, rs, ctx); } -static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_jit_context *ctx) +static int emit_atomic_ld_st(u8 rd, u8 rs, const struct bpf_insn *insn, + struct rv_jit_context *ctx) { + u8 code = insn->code; + s32 imm = insn->imm; + s16 off = insn->off; + switch (imm) { /* dst_reg = load_acquire(src_reg + off16) */ case BPF_LOAD_ACQ: @@ -627,6 +632,12 @@ static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_ break; } emit_fence_r_rw(ctx); + + /* If our next insn is a redundant zext, return 1 to tell + * build_body() to skip it. + */ + if (BPF_SIZE(code) != BPF_DW && insn_is_zext(&insn[1])) + return 1; break; /* store_release(dst_reg + off16, src_reg) */ case BPF_STORE_REL: @@ -654,10 +665,12 @@ static int emit_atomic_ld_st(u8 rd, u8 rs, s16 off, s32 imm, u8 code, struct rv_ return 0; } -static int emit_atomic_rmw(u8 rd, u8 rs, s16 off, s32 imm, u8 code, +static int emit_atomic_rmw(u8 rd, u8 rs, const struct bpf_insn *insn, struct rv_jit_context *ctx) { - u8 r0; + u8 r0, code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; int jmp_offset; bool is64; @@ -2026,9 +2039,9 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_DW: if (bpf_atomic_is_load_store(insn)) - ret = emit_atomic_ld_st(rd, rs, off, imm, code, ctx); + ret = emit_atomic_ld_st(rd, rs, insn, ctx); else - ret = emit_atomic_rmw(rd, rs, off, imm, code, ctx); + ret = emit_atomic_rmw(rd, rs, insn, ctx); if (ret) return ret; break; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index f8cd2f70a7fb..f6ca5cfa6b2f 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -26,9 +26,8 @@ static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset) int ret; ret = bpf_jit_emit_insn(insn, ctx, extra_pass); - /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */ if (ret > 0) - i++; + i++; /* skip the next instruction */ if (offset) offset[i] = ctx->ninsns; if (ret < 0) From 13fdecf3456e21f91a4403f989464edcc5d61c22 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:43:13 +0000 Subject: [PATCH 069/135] selftests/bpf: Use CAN_USE_LOAD_ACQ_STORE_REL when appropriate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of open-coding the conditions, use '#ifdef CAN_USE_LOAD_ACQ_STORE_REL' to guard the following tests: verifier_precision/bpf_load_acquire verifier_precision/bpf_store_release verifier_store_release/* Note that, for the first two tests in verifier_precision.c, switching to '#ifdef CAN_USE_LOAD_ACQ_STORE_REL' means also checking if '__clang_major__ >= 18', which has already been guaranteed by the outer '#if' check. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/45d7e025f6e390a8ff36f08fc51e31705ac896bd.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/verifier_precision.c | 5 ++--- tools/testing/selftests/bpf/progs/verifier_store_release.c | 7 +++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c index 6662d4b39969..2dd0d15c2678 100644 --- a/tools/testing/selftests/bpf/progs/verifier_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -91,8 +91,7 @@ __naked int bpf_end_bswap(void) ::: __clobber_all); } -#if defined(ENABLE_ATOMICS_TESTS) && \ - (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +#ifdef CAN_USE_LOAD_ACQ_STORE_REL SEC("?raw_tp") __success __log_level(2) @@ -138,7 +137,7 @@ __naked int bpf_store_release(void) : __clobber_all); } -#endif /* load-acquire, store-release */ +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ #endif /* v4 instruction */ SEC("?raw_tp") diff --git a/tools/testing/selftests/bpf/progs/verifier_store_release.c b/tools/testing/selftests/bpf/progs/verifier_store_release.c index c0442d5bb049..7e456e2861b4 100644 --- a/tools/testing/selftests/bpf/progs/verifier_store_release.c +++ b/tools/testing/selftests/bpf/progs/verifier_store_release.c @@ -6,8 +6,7 @@ #include "../../../include/linux/filter.h" #include "bpf_misc.h" -#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ - (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +#ifdef CAN_USE_LOAD_ACQ_STORE_REL SEC("socket") __description("store-release, 8-bit") @@ -271,7 +270,7 @@ __naked void store_release_with_invalid_reg(void) : __clobber_all); } -#else +#else /* CAN_USE_LOAD_ACQ_STORE_REL */ SEC("socket") __description("Clang version < 18, ENABLE_ATOMICS_TESTS not defined, and/or JIT doesn't support store-release, use a dummy test") @@ -281,6 +280,6 @@ int dummy_test(void) return 0; } -#endif +#endif /* CAN_USE_LOAD_ACQ_STORE_REL */ char _license[] SEC("license") = "GPL"; From 6e492ffcab6064cd7b317dc1f49fb9f92407aaa7 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:43:19 +0000 Subject: [PATCH 070/135] selftests/bpf: Avoid passing out-of-range values to __retval() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we pass 0x1234567890abcdef to __retval() for the following two tests: verifier_load_acquire/load_acquire_64 verifier_store_release/store_release_64 However, the upper 32 bits of that value are being ignored, since __retval() expects an int. Actually, the tests would still pass even if I change '__retval(0x1234567890abcdef)' to e.g. '__retval(0x90abcdef)'. Restructure the tests a bit to test the entire 64-bit values properly. Do the same to their 8-, 16- and 32-bit variants as well to keep the style consistent. Fixes: ff3afe5da998 ("selftests/bpf: Add selftests for load-acquire and store-release instructions") Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/d67f4c6f6ee0d0388cbce1f4892ec4176ee2d604.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- .../bpf/progs/verifier_load_acquire.c | 40 +++++++++++++------ .../bpf/progs/verifier_store_release.c | 32 +++++++++++---- 2 files changed, 52 insertions(+), 20 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c index 77698d5a19e4..a696ab84bfd6 100644 --- a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c +++ b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c @@ -10,65 +10,81 @@ SEC("socket") __description("load-acquire, 8-bit") -__success __success_unpriv __retval(0x12) +__success __success_unpriv __retval(0) __naked void load_acquire_8(void) { asm volatile ( + "r0 = 0;" "w1 = 0x12;" "*(u8 *)(r10 - 1) = w1;" - ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r10 - 1)); + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u8 *)(r10 - 1)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -1)) + BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -1)) : __clobber_all); } SEC("socket") __description("load-acquire, 16-bit") -__success __success_unpriv __retval(0x1234) +__success __success_unpriv __retval(0) __naked void load_acquire_16(void) { asm volatile ( + "r0 = 0;" "w1 = 0x1234;" "*(u16 *)(r10 - 2) = w1;" - ".8byte %[load_acquire_insn];" // w0 = load_acquire((u16 *)(r10 - 2)); + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u16 *)(r10 - 2)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -2)) + BPF_ATOMIC_OP(BPF_H, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -2)) : __clobber_all); } SEC("socket") __description("load-acquire, 32-bit") -__success __success_unpriv __retval(0x12345678) +__success __success_unpriv __retval(0) __naked void load_acquire_32(void) { asm volatile ( + "r0 = 0;" "w1 = 0x12345678;" "*(u32 *)(r10 - 4) = w1;" - ".8byte %[load_acquire_insn];" // w0 = load_acquire((u32 *)(r10 - 4)); + ".8byte %[load_acquire_insn];" // w2 = load_acquire((u32 *)(r10 - 4)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -4)) + BPF_ATOMIC_OP(BPF_W, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -4)) : __clobber_all); } SEC("socket") __description("load-acquire, 64-bit") -__success __success_unpriv __retval(0x1234567890abcdef) +__success __success_unpriv __retval(0) __naked void load_acquire_64(void) { asm volatile ( + "r0 = 0;" "r1 = 0x1234567890abcdef ll;" "*(u64 *)(r10 - 8) = r1;" - ".8byte %[load_acquire_insn];" // r0 = load_acquire((u64 *)(r10 - 8)); + ".8byte %[load_acquire_insn];" // r2 = load_acquire((u64 *)(r10 - 8)); + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(load_acquire_insn, - BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_10, -8)) + BPF_ATOMIC_OP(BPF_DW, BPF_LOAD_ACQ, BPF_REG_2, BPF_REG_10, -8)) : __clobber_all); } diff --git a/tools/testing/selftests/bpf/progs/verifier_store_release.c b/tools/testing/selftests/bpf/progs/verifier_store_release.c index 7e456e2861b4..72f1eb006074 100644 --- a/tools/testing/selftests/bpf/progs/verifier_store_release.c +++ b/tools/testing/selftests/bpf/progs/verifier_store_release.c @@ -10,13 +10,17 @@ SEC("socket") __description("store-release, 8-bit") -__success __success_unpriv __retval(0x12) +__success __success_unpriv __retval(0) __naked void store_release_8(void) { asm volatile ( + "r0 = 0;" "w1 = 0x12;" ".8byte %[store_release_insn];" // store_release((u8 *)(r10 - 1), w1); - "w0 = *(u8 *)(r10 - 1);" + "w2 = *(u8 *)(r10 - 1);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -26,13 +30,17 @@ __naked void store_release_8(void) SEC("socket") __description("store-release, 16-bit") -__success __success_unpriv __retval(0x1234) +__success __success_unpriv __retval(0) __naked void store_release_16(void) { asm volatile ( + "r0 = 0;" "w1 = 0x1234;" ".8byte %[store_release_insn];" // store_release((u16 *)(r10 - 2), w1); - "w0 = *(u16 *)(r10 - 2);" + "w2 = *(u16 *)(r10 - 2);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -42,13 +50,17 @@ __naked void store_release_16(void) SEC("socket") __description("store-release, 32-bit") -__success __success_unpriv __retval(0x12345678) +__success __success_unpriv __retval(0) __naked void store_release_32(void) { asm volatile ( + "r0 = 0;" "w1 = 0x12345678;" ".8byte %[store_release_insn];" // store_release((u32 *)(r10 - 4), w1); - "w0 = *(u32 *)(r10 - 4);" + "w2 = *(u32 *)(r10 - 4);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, @@ -58,13 +70,17 @@ __naked void store_release_32(void) SEC("socket") __description("store-release, 64-bit") -__success __success_unpriv __retval(0x1234567890abcdef) +__success __success_unpriv __retval(0) __naked void store_release_64(void) { asm volatile ( + "r0 = 0;" "r1 = 0x1234567890abcdef ll;" ".8byte %[store_release_insn];" // store_release((u64 *)(r10 - 8), r1); - "r0 = *(u64 *)(r10 - 8);" + "r2 = *(u64 *)(r10 - 8);" + "if r2 == r1 goto 1f;" + "r0 = 1;" +"1:" "exit;" : : __imm_insn(store_release_insn, From 0357f29de80983992c2d3e9a5ccb4fcd03a79d76 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:43:25 +0000 Subject: [PATCH 071/135] selftests/bpf: Verify zero-extension behavior in load-acquire tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Verify that 8-, 16- and 32-bit load-acquires are zero-extending by using immediate values with their highest bit set. Do the same for the 64-bit variant to keep the style consistent. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/11097fd515f10308b3941469ee4c86cb8872db3f.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/verifier_load_acquire.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c index a696ab84bfd6..74f4f19c10b8 100644 --- a/tools/testing/selftests/bpf/progs/verifier_load_acquire.c +++ b/tools/testing/selftests/bpf/progs/verifier_load_acquire.c @@ -15,7 +15,7 @@ __naked void load_acquire_8(void) { asm volatile ( "r0 = 0;" - "w1 = 0x12;" + "w1 = 0xfe;" "*(u8 *)(r10 - 1) = w1;" ".8byte %[load_acquire_insn];" // w2 = load_acquire((u8 *)(r10 - 1)); "if r2 == r1 goto 1f;" @@ -35,7 +35,7 @@ __naked void load_acquire_16(void) { asm volatile ( "r0 = 0;" - "w1 = 0x1234;" + "w1 = 0xfedc;" "*(u16 *)(r10 - 2) = w1;" ".8byte %[load_acquire_insn];" // w2 = load_acquire((u16 *)(r10 - 2)); "if r2 == r1 goto 1f;" @@ -55,7 +55,7 @@ __naked void load_acquire_32(void) { asm volatile ( "r0 = 0;" - "w1 = 0x12345678;" + "w1 = 0xfedcba09;" "*(u32 *)(r10 - 4) = w1;" ".8byte %[load_acquire_insn];" // w2 = load_acquire((u32 *)(r10 - 4)); "if r2 == r1 goto 1f;" @@ -75,7 +75,7 @@ __naked void load_acquire_64(void) { asm volatile ( "r0 = 0;" - "r1 = 0x1234567890abcdef ll;" + "r1 = 0xfedcba0987654321 ll;" "*(u64 *)(r10 - 8) = r1;" ".8byte %[load_acquire_insn];" // r2 = load_acquire((u64 *)(r10 - 8)); "if r2 == r1 goto 1f;" From d3131466b4f8d2b3a8eea53daf524aaeeba03d4c Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Wed, 7 May 2025 03:43:31 +0000 Subject: [PATCH 072/135] selftests/bpf: Enable non-arena load-acquire/store-release selftests for riscv64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For riscv64, enable all BPF_{LOAD_ACQ,STORE_REL} selftests except the arena_atomics/* ones (not guarded behind CAN_USE_LOAD_ACQ_STORE_REL), since arena access is not yet supported. Acked-by: Björn Töpel Reviewed-by: Pu Lehui Tested-by: Björn Töpel # QEMU/RVA23 Signed-off-by: Peilin Ye Link: https://lore.kernel.org/r/9d878fa99a72626208a8eed3c04c4140caf77fda.1746588351.git.yepeilin@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_misc.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 863df7c0fdd0..6e208e24ba3b 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -225,8 +225,9 @@ #define CAN_USE_BPF_ST #endif -#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ - (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86)) +#if __clang_major__ >= 18 && defined(ENABLE_ATOMICS_TESTS) && \ + (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) #define CAN_USE_LOAD_ACQ_STORE_REL #endif From ee971630f20fd421fffcdc4543731ebcb54ed6d0 Mon Sep 17 00:00:00 2001 From: Feng Yang Date: Tue, 6 May 2025 14:14:33 +0800 Subject: [PATCH 073/135] bpf: Allow some trace helpers for all prog types if it works under NMI and doesn't use any context-dependent things, should be fine for any program type. The detailed discussion is in [1]. [1] https://lore.kernel.org/all/CAEf4Bza6gK3dsrTosk6k3oZgtHesNDSrDd8sdeQ-GiS6oJixQg@mail.gmail.com/ Suggested-by: Andrii Nakryiko Signed-off-by: Feng Yang Signed-off-by: Andrii Nakryiko Acked-by: Tejun Heo Link: https://lore.kernel.org/bpf/20250506061434.94277-2-yangfeng59949@163.com --- include/linux/bpf-cgroup.h | 8 -------- kernel/bpf/cgroup.c | 32 ----------------------------- kernel/bpf/helpers.c | 42 ++++++++++++++++++++++++++++++++++++++ kernel/trace/bpf_trace.c | 41 ++++--------------------------------- net/core/filter.c | 14 ------------- 5 files changed, 46 insertions(+), 91 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 9de7adb68294..4847dcade917 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -427,8 +427,6 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, const struct bpf_func_proto * cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); -const struct bpf_func_proto * -cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); #else static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } @@ -465,12 +463,6 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return NULL; } -static inline const struct bpf_func_proto * -cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) -{ - return NULL; -} - static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 84f58f3d028a..62a1d8deb3dc 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -1653,10 +1653,6 @@ cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; @@ -2204,10 +2200,6 @@ sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { case BPF_FUNC_sysctl_get_name: return &bpf_sysctl_get_name_proto; @@ -2351,10 +2343,6 @@ cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { #ifdef CONFIG_NET case BPF_FUNC_get_netns_cookie: @@ -2601,23 +2589,3 @@ cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return NULL; } } - -/* Common helpers for cgroup hooks with valid process context. */ -const struct bpf_func_proto * -cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) -{ - switch (func_id) { - case BPF_FUNC_get_current_uid_gid: - return &bpf_get_current_uid_gid_proto; - case BPF_FUNC_get_current_comm: - return &bpf_get_current_comm_proto; -#ifdef CONFIG_CGROUP_NET_CLASSID - case BPF_FUNC_get_cgroup_classid: - return &bpf_get_cgroup_classid_curr_proto; -#endif - case BPF_FUNC_current_task_under_cgroup: - return &bpf_current_task_under_cgroup_proto; - default: - return NULL; - } -} diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 78cefb41266a..fed53da75025 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "../../lib/kstrtox.h" @@ -1912,6 +1913,12 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; const struct bpf_func_proto bpf_task_pt_regs_proto __weak; +const struct bpf_func_proto bpf_perf_event_read_proto __weak; +const struct bpf_func_proto bpf_send_signal_proto __weak; +const struct bpf_func_proto bpf_send_signal_thread_proto __weak; +const struct bpf_func_proto bpf_get_task_stack_sleepable_proto __weak; +const struct bpf_func_proto bpf_get_task_stack_proto __weak; +const struct bpf_func_proto bpf_get_branch_snapshot_proto __weak; const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) @@ -1965,6 +1972,8 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_pid_tgid_proto; case BPF_FUNC_get_ns_current_pid_tgid: return &bpf_get_ns_current_pid_tgid_proto; + case BPF_FUNC_get_current_uid_gid: + return &bpf_get_current_uid_gid_proto; default: break; } @@ -2022,7 +2031,21 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_get_current_ancestor_cgroup_id: return &bpf_get_current_ancestor_cgroup_id_proto; + case BPF_FUNC_current_task_under_cgroup: + return &bpf_current_task_under_cgroup_proto; #endif +#ifdef CONFIG_CGROUP_NET_CLASSID + case BPF_FUNC_get_cgroup_classid: + return &bpf_get_cgroup_classid_curr_proto; +#endif + case BPF_FUNC_task_storage_get: + if (bpf_prog_check_recur(prog)) + return &bpf_task_storage_get_recur_proto; + return &bpf_task_storage_get_proto; + case BPF_FUNC_task_storage_delete: + if (bpf_prog_check_recur(prog)) + return &bpf_task_storage_delete_recur_proto; + return &bpf_task_storage_delete_proto; default: break; } @@ -2037,6 +2060,8 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_current_task_proto; case BPF_FUNC_get_current_task_btf: return &bpf_get_current_task_btf_proto; + case BPF_FUNC_get_current_comm: + return &bpf_get_current_comm_proto; case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: @@ -2047,6 +2072,10 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_probe_read_kernel_str: return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_str_proto; + case BPF_FUNC_copy_from_user: + return &bpf_copy_from_user_proto; + case BPF_FUNC_copy_from_user_task: + return &bpf_copy_from_user_task_proto; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; case BPF_FUNC_snprintf: @@ -2057,6 +2086,19 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return bpf_get_trace_vprintk_proto(); case BPF_FUNC_perf_event_read_value: return bpf_get_perf_event_read_value_proto(); + case BPF_FUNC_perf_event_read: + return &bpf_perf_event_read_proto; + case BPF_FUNC_send_signal: + return &bpf_send_signal_proto; + case BPF_FUNC_send_signal_thread: + return &bpf_send_signal_thread_proto; + case BPF_FUNC_get_task_stack: + return prog->sleepable ? &bpf_get_task_stack_sleepable_proto + : &bpf_get_task_stack_proto; + case BPF_FUNC_get_branch_snapshot: + return &bpf_get_branch_snapshot_proto; + case BPF_FUNC_find_vma: + return &bpf_find_vma_proto; default: return NULL; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 52c432a44aeb..868920994517 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -572,7 +572,7 @@ BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) return value; } -static const struct bpf_func_proto bpf_perf_event_read_proto = { +const struct bpf_func_proto bpf_perf_event_read_proto = { .func = bpf_perf_event_read, .gpl_only = true, .ret_type = RET_INTEGER, @@ -882,7 +882,7 @@ BPF_CALL_1(bpf_send_signal, u32, sig) return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0); } -static const struct bpf_func_proto bpf_send_signal_proto = { +const struct bpf_func_proto bpf_send_signal_proto = { .func = bpf_send_signal, .gpl_only = false, .ret_type = RET_INTEGER, @@ -894,7 +894,7 @@ BPF_CALL_1(bpf_send_signal_thread, u32, sig) return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0); } -static const struct bpf_func_proto bpf_send_signal_thread_proto = { +const struct bpf_func_proto bpf_send_signal_thread_proto = { .func = bpf_send_signal_thread, .gpl_only = false, .ret_type = RET_INTEGER, @@ -1185,7 +1185,7 @@ BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) return entry_cnt * br_entry_size; } -static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { +const struct bpf_func_proto bpf_get_branch_snapshot_proto = { .func = bpf_get_branch_snapshot, .gpl_only = true, .ret_type = RET_INTEGER, @@ -1430,14 +1430,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) const struct bpf_func_proto *func_proto; switch (func_id) { - case BPF_FUNC_get_current_uid_gid: - return &bpf_get_current_uid_gid_proto; - case BPF_FUNC_get_current_comm: - return &bpf_get_current_comm_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; - case BPF_FUNC_perf_event_read: - return &bpf_perf_event_read_proto; #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE case BPF_FUNC_probe_read: return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? @@ -1446,35 +1440,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_compat_str_proto; #endif -#ifdef CONFIG_CGROUPS - case BPF_FUNC_current_task_under_cgroup: - return &bpf_current_task_under_cgroup_proto; -#endif - case BPF_FUNC_send_signal: - return &bpf_send_signal_proto; - case BPF_FUNC_send_signal_thread: - return &bpf_send_signal_thread_proto; - case BPF_FUNC_get_task_stack: - return prog->sleepable ? &bpf_get_task_stack_sleepable_proto - : &bpf_get_task_stack_proto; - case BPF_FUNC_copy_from_user: - return &bpf_copy_from_user_proto; - case BPF_FUNC_copy_from_user_task: - return &bpf_copy_from_user_task_proto; - case BPF_FUNC_task_storage_get: - if (bpf_prog_check_recur(prog)) - return &bpf_task_storage_get_recur_proto; - return &bpf_task_storage_get_proto; - case BPF_FUNC_task_storage_delete: - if (bpf_prog_check_recur(prog)) - return &bpf_task_storage_delete_recur_proto; - return &bpf_task_storage_delete_proto; case BPF_FUNC_get_func_ip: return &bpf_get_func_ip_proto_tracing; - case BPF_FUNC_get_branch_snapshot: - return &bpf_get_branch_snapshot_proto; - case BPF_FUNC_find_vma: - return &bpf_find_vma_proto; default: break; } diff --git a/net/core/filter.c b/net/core/filter.c index 79cab4d78dc3..30e7d3679088 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8022,10 +8022,6 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_sock_proto; @@ -8051,10 +8047,6 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) if (func_proto) return func_proto; - func_proto = cgroup_current_func_proto(func_id, prog); - if (func_proto) - return func_proto; - switch (func_id) { case BPF_FUNC_bind: switch (prog->expected_attach_type) { @@ -8488,18 +8480,12 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_msg_pop_data_proto; case BPF_FUNC_perf_event_output: return &bpf_event_output_data_proto; - case BPF_FUNC_get_current_uid_gid: - return &bpf_get_current_uid_gid_proto; case BPF_FUNC_sk_storage_get: return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; case BPF_FUNC_get_netns_cookie: return &bpf_get_netns_cookie_sk_msg_proto; -#ifdef CONFIG_CGROUP_NET_CLASSID - case BPF_FUNC_get_cgroup_classid: - return &bpf_get_cgroup_classid_curr_proto; -#endif default: return bpf_sk_base_func_proto(func_id, prog); } From 8c112a428b94eabb6efde39715349a0b8dec880e Mon Sep 17 00:00:00 2001 From: Feng Yang Date: Tue, 6 May 2025 14:14:34 +0800 Subject: [PATCH 074/135] sched_ext: Remove bpf_scx_get_func_proto task_storage_{get,delete} has been moved to bpf_base_func_proto. Suggested-by: Andrii Nakryiko Signed-off-by: Feng Yang Signed-off-by: Andrii Nakryiko Acked-by: Tejun Heo Link: https://lore.kernel.org/bpf/20250506061434.94277-3-yangfeng59949@163.com --- kernel/sched/ext.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index fdbf249d1c68..cc628b009e11 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5586,21 +5586,8 @@ static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, return -EACCES; } -static const struct bpf_func_proto * -bpf_scx_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) -{ - switch (func_id) { - case BPF_FUNC_task_storage_get: - return &bpf_task_storage_get_proto; - case BPF_FUNC_task_storage_delete: - return &bpf_task_storage_delete_proto; - default: - return bpf_base_func_proto(func_id, prog); - } -} - static const struct bpf_verifier_ops bpf_scx_verifier_ops = { - .get_func_proto = bpf_scx_get_func_proto, + .get_func_proto = bpf_base_func_proto, .is_valid_access = bpf_scx_is_valid_access, .btf_struct_access = bpf_scx_btf_struct_access, }; From cf15cdc0f0f39a5c6315200808ec3e3995b0c2d2 Mon Sep 17 00:00:00 2001 From: Luis Gerhorst Date: Thu, 1 May 2025 09:35:52 +0200 Subject: [PATCH 075/135] selftests/bpf: Fix caps for __xlated/jited_unpriv Currently, __xlated_unpriv and __jited_unpriv do not work because the BPF syscall will overwrite info.jited_prog_len and info.xlated_prog_len with 0 if the process is not bpf_capable(). This bug was not noticed before, because there is no test that actually uses __xlated_unpriv/__jited_unpriv. To resolve this, simply restore the capabilities earlier (but still after loading the program). Adding this here unconditionally is fine because the function first checks that the capabilities were initialized before attempting to restore them. This will be important later when we add tests that check whether a speculation barrier was inserted in the correct location. Signed-off-by: Luis Gerhorst Fixes: 9c9f73391310 ("selftests/bpf: allow checking xlated programs in verifier_* tests") Fixes: 7d743e4c759c ("selftests/bpf: __jited test tag to check disassembly after jit") Acked-by: Kumar Kartikeya Dwivedi Tested-by: Eduard Zingerman Link: https://lore.kernel.org/r/20250501073603.1402960-2-luis.gerhorst@fau.de Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_loader.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 49f2fc61061f..9551d8d5f8f9 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -1042,6 +1042,14 @@ void run_subtest(struct test_loader *tester, emit_verifier_log(tester->log_buf, false /*force*/); validate_msgs(tester->log_buf, &subspec->expect_msgs, emit_verifier_log); + /* Restore capabilities because the kernel will silently ignore requests + * for program info (such as xlated program text) if we are not + * bpf-capable. Also, for some reason test_verifier executes programs + * with all capabilities restored. Do the same here. + */ + if (restore_capabilities(&caps)) + goto tobj_cleanup; + if (subspec->expect_xlated.cnt) { err = get_xlated_program_text(bpf_program__fd(tprog), tester->log_buf, tester->log_buf_sz); @@ -1067,12 +1075,6 @@ void run_subtest(struct test_loader *tester, } if (should_do_test_run(spec, subspec)) { - /* For some reason test_verifier executes programs - * with all capabilities restored. Do the same here. - */ - if (restore_capabilities(&caps)) - goto tobj_cleanup; - /* Do bpf_map__attach_struct_ops() for each struct_ops map. * This should trigger bpf_struct_ops->reg callback on kernel side. */ From cb4a11925268b13ebcac322775d78bdd4e1b26d3 Mon Sep 17 00:00:00 2001 From: Ihor Solodrai Date: Thu, 8 May 2025 13:37:08 -0700 Subject: [PATCH 076/135] scripts/bpf_doc.py: implement json output format bpf_doc.py parses bpf.h header to collect information about various API elements (such as BPF helpers) and then dump them in one of the supported formats: rst docs and a C header. It's useful for external tools to be able to consume this information in an easy-to-parse format such as JSON. Implement JSON printers and add --json command line argument. v3->v4: refactor attrs to only be a helper's field v2->v3: nit cleanup v1->v2: add json printer for syscall target v3: https://lore.kernel.org/bpf/20250507203034.270428-1-isolodrai@meta.com/ v2: https://lore.kernel.org/bpf/20250507182802.3833349-1-isolodrai@meta.com/ v1: https://lore.kernel.org/bpf/20250506000605.497296-1-isolodrai@meta.com/ Signed-off-by: Ihor Solodrai Tested-by: Quentin Monnet Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/r/20250508203708.2520847-1-isolodrai@meta.com Signed-off-by: Alexei Starovoitov --- scripts/bpf_doc.py | 117 +++++++++++++++++++++++++++++++++++++-------- 1 file changed, 98 insertions(+), 19 deletions(-) diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py index e74a01a85070..c77dc40f7689 100755 --- a/scripts/bpf_doc.py +++ b/scripts/bpf_doc.py @@ -8,6 +8,7 @@ from __future__ import print_function import argparse +import json import re import sys, os import subprocess @@ -37,11 +38,17 @@ class APIElement(object): @desc: textual description of the symbol @ret: (optional) description of any associated return value """ - def __init__(self, proto='', desc='', ret='', attrs=[]): + def __init__(self, proto='', desc='', ret=''): self.proto = proto self.desc = desc self.ret = ret - self.attrs = attrs + + def to_dict(self): + return { + 'proto': self.proto, + 'desc': self.desc, + 'ret': self.ret + } class Helper(APIElement): @@ -51,8 +58,9 @@ class Helper(APIElement): @desc: textual description of the helper function @ret: description of the return value of the helper function """ - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) + def __init__(self, proto='', desc='', ret='', attrs=[]): + super().__init__(proto, desc, ret) + self.attrs = attrs self.enum_val = None def proto_break_down(self): @@ -81,6 +89,12 @@ class Helper(APIElement): return res + def to_dict(self): + d = super().to_dict() + d["attrs"] = self.attrs + d.update(self.proto_break_down()) + return d + ATTRS = { '__bpf_fastcall': 'bpf_fastcall' @@ -675,7 +689,7 @@ COMMANDS self.print_elem(command) -class PrinterHelpers(Printer): +class PrinterHelpersHeader(Printer): """ A printer for dumping collected information about helpers as C header to be included from BPF program. @@ -896,6 +910,43 @@ class PrinterHelpers(Printer): print(') = (void *) %d;' % helper.enum_val) print('') + +class PrinterHelpersJSON(Printer): + """ + A printer for dumping collected information about helpers as a JSON file. + @parser: A HeaderParser with Helper objects + """ + + def __init__(self, parser): + self.elements = parser.helpers + self.elem_number_check( + parser.desc_unique_helpers, + parser.define_unique_helpers, + "helper", + "___BPF_FUNC_MAPPER", + ) + + def print_all(self): + helper_dicts = [helper.to_dict() for helper in self.elements] + out_dict = {'helpers': helper_dicts} + print(json.dumps(out_dict, indent=4)) + + +class PrinterSyscallJSON(Printer): + """ + A printer for dumping collected syscall information as a JSON file. + @parser: A HeaderParser with APIElement objects + """ + + def __init__(self, parser): + self.elements = parser.commands + self.elem_number_check(parser.desc_syscalls, parser.enum_syscalls, 'syscall', 'bpf_cmd') + + def print_all(self): + syscall_dicts = [syscall.to_dict() for syscall in self.elements] + out_dict = {'syscall': syscall_dicts} + print(json.dumps(out_dict, indent=4)) + ############################################################################### # If script is launched from scripts/ from kernel tree and can access @@ -905,9 +956,17 @@ script = os.path.abspath(sys.argv[0]) linuxRoot = os.path.dirname(os.path.dirname(script)) bpfh = os.path.join(linuxRoot, 'include/uapi/linux/bpf.h') +# target -> output format -> printer printers = { - 'helpers': PrinterHelpersRST, - 'syscall': PrinterSyscallRST, + 'helpers': { + 'rst': PrinterHelpersRST, + 'json': PrinterHelpersJSON, + 'header': PrinterHelpersHeader, + }, + 'syscall': { + 'rst': PrinterSyscallRST, + 'json': PrinterSyscallJSON + }, } argParser = argparse.ArgumentParser(description=""" @@ -917,6 +976,8 @@ rst2man utility. """) argParser.add_argument('--header', action='store_true', help='generate C header file') +argParser.add_argument('--json', action='store_true', + help='generate a JSON') if (os.path.isfile(bpfh)): argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h', default=bpfh) @@ -924,17 +985,35 @@ else: argParser.add_argument('--filename', help='path to include/uapi/linux/bpf.h') argParser.add_argument('target', nargs='?', default='helpers', choices=printers.keys(), help='eBPF API target') -args = argParser.parse_args() -# Parse file. -headerParser = HeaderParser(args.filename) -headerParser.run() +def error_die(message: str): + argParser.print_usage(file=sys.stderr) + print('Error: {}'.format(message), file=sys.stderr) + exit(1) -# Print formatted output to standard output. -if args.header: - if args.target != 'helpers': - raise NotImplementedError('Only helpers header generation is supported') - printer = PrinterHelpers(headerParser) -else: - printer = printers[args.target](headerParser) -printer.print_all() +def parse_and_dump(): + args = argParser.parse_args() + + # Parse file. + headerParser = HeaderParser(args.filename) + headerParser.run() + + if args.header and args.json: + error_die('Use either --header or --json, not both') + + output_format = 'rst' + if args.header: + output_format = 'header' + elif args.json: + output_format = 'json' + + try: + printer = printers[args.target][output_format](headerParser) + # Print formatted output to standard output. + printer.print_all() + except KeyError: + error_die('Unsupported target/format combination: "{}", "{}"' + .format(args.target, output_format)) + +if __name__ == "__main__": + parse_and_dump() From 823153334042746604fdb416ea358a90940c1d83 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 9 May 2025 17:35:37 +0200 Subject: [PATCH 077/135] bpf: Add support to retrieve ref_ctr_offset for uprobe perf link Adding support to retrieve ref_ctr_offset for uprobe perf link, which got somehow omitted from the initial uprobe link info changes. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Acked-by: Yafang Shao Link: https://lore.kernel.org/bpf/20250509153539.779599-2-jolsa@kernel.org --- include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 5 +++-- kernel/trace/trace_uprobe.c | 2 +- tools/include/uapi/linux/bpf.h | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 71d5ac83cf5d..16e95398c91c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6724,6 +6724,7 @@ struct bpf_link_info { __u32 name_len; __u32 offset; /* offset from file_name */ __u64 cookie; + __u64 ref_ctr_offset; } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */ struct { __aligned_u64 func_name; /* in/out */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index df33d19c5c3b..4b5f29168618 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3800,14 +3800,14 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event, static int bpf_perf_link_fill_uprobe(const struct perf_event *event, struct bpf_link_info *info) { + u64 ref_ctr_offset, offset; char __user *uname; - u64 addr, offset; u32 ulen, type; int err; uname = u64_to_user_ptr(info->perf_event.uprobe.file_name); ulen = info->perf_event.uprobe.name_len; - err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr, + err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &ref_ctr_offset, &type, NULL); if (err) return err; @@ -3819,6 +3819,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event, info->perf_event.uprobe.name_len = ulen; info->perf_event.uprobe.offset = offset; info->perf_event.uprobe.cookie = event->bpf_cookie; + info->perf_event.uprobe.ref_ctr_offset = ref_ctr_offset; return 0; } #endif diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 3386439ec9f6..d9cf6ed2c106 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -1489,7 +1489,7 @@ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, : BPF_FD_TYPE_UPROBE; *filename = tu->filename; *probe_offset = tu->offset; - *probe_addr = 0; + *probe_addr = tu->ref_ctr_offset; return 0; } #endif /* CONFIG_PERF_EVENTS */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 71d5ac83cf5d..16e95398c91c 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6724,6 +6724,7 @@ struct bpf_link_info { __u32 name_len; __u32 offset; /* offset from file_name */ __u64 cookie; + __u64 ref_ctr_offset; } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */ struct { __aligned_u64 func_name; /* in/out */ From d57293db64f57ae689bd840bb7db421dd9816faf Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 9 May 2025 17:35:38 +0200 Subject: [PATCH 078/135] selftests/bpf: Add link info test for ref_ctr_offset retrieval Adding link info test for ref_ctr_offset retrieval for both uprobe and uretprobe probes. Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Acked-by: Yafang Shao Link: https://lore.kernel.org/bpf/20250509153539.779599-3-jolsa@kernel.org --- .../selftests/bpf/prog_tests/fill_link_info.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index e59af2aa6601..e40114620751 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -37,6 +37,7 @@ static noinline void uprobe_func(void) static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, ssize_t offset, ssize_t entry_offset) { + ssize_t ref_ctr_offset = entry_offset /* ref_ctr_offset for uprobes */; struct bpf_link_info info; __u32 len = sizeof(info); char buf[PATH_MAX]; @@ -97,6 +98,7 @@ again: case BPF_PERF_EVENT_UPROBE: case BPF_PERF_EVENT_URETPROBE: ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset"); + ASSERT_EQ(info.perf_event.uprobe.ref_ctr_offset, ref_ctr_offset, "uprobe_ref_ctr_offset"); ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1, "name_len"); @@ -241,20 +243,32 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, .retprobe = type == BPF_PERF_EVENT_URETPROBE, .bpf_cookie = PERF_EVENT_COOKIE, ); + const char *sema[1] = { + "uprobe_link_info_sema_1", + }; + __u64 *ref_ctr_offset; struct bpf_link *link; int link_fd, err; + err = elf_resolve_syms_offsets("/proc/self/exe", 1, sema, + (unsigned long **) &ref_ctr_offset, STT_OBJECT); + if (!ASSERT_OK(err, "elf_resolve_syms_offsets_object")) + return; + + opts.ref_ctr_offset = *ref_ctr_offset; link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, 0, /* self pid */ UPROBE_FILE, uprobe_offset, &opts); if (!ASSERT_OK_PTR(link, "attach_uprobe")) - return; + goto out; link_fd = bpf_link__fd(link); - err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0); + err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, *ref_ctr_offset); ASSERT_OK(err, "verify_perf_link_info"); bpf_link__destroy(link); +out: + free(ref_ctr_offset); } static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies) From 97596edfec0125db3d6520193cd10be69e24010c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 9 May 2025 17:35:39 +0200 Subject: [PATCH 079/135] bpftool: Display ref_ctr_offset for uprobe link info Adding support to display ref_ctr_offset in link output, like: # bpftool link ... 42: perf_event prog 174 uprobe /proc/self/exe+0x102f13 cookie 3735928559 ref_ctr_offset 0x303a3fa bpf_cookie 3735928559 pids test_progs(1820) # bpftool link -j | jq [ ... { "id": 42, ... "ref_ctr_offset": 50500538, } ] Signed-off-by: Jiri Olsa Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250509153539.779599-4-jolsa@kernel.org --- tools/bpf/bpftool/link.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 52fd2c9fac56..3535afc80a49 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -380,6 +380,7 @@ show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr) u64_to_ptr(info->perf_event.uprobe.file_name)); jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset); jsonw_uint_field(wtr, "cookie", info->perf_event.uprobe.cookie); + jsonw_uint_field(wtr, "ref_ctr_offset", info->perf_event.uprobe.ref_ctr_offset); } static void @@ -823,6 +824,8 @@ static void show_perf_event_uprobe_plain(struct bpf_link_info *info) printf("%s+%#x ", buf, info->perf_event.uprobe.offset); if (info->perf_event.uprobe.cookie) printf("cookie %llu ", info->perf_event.uprobe.cookie); + if (info->perf_event.uprobe.ref_ctr_offset) + printf("ref_ctr_offset 0x%llx ", info->perf_event.uprobe.ref_ctr_offset); } static void show_perf_event_tracepoint_plain(struct bpf_link_info *info) From 79f0c39ae7d3dc628c01b02f23ca5d01f9875040 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Fri, 25 Apr 2025 13:59:57 +0800 Subject: [PATCH 080/135] ktls, sockmap: Fix missing uncharge operation When we specify apply_bytes, we divide the msg into multiple segments, each with a length of 'send', and every time we send this part of the data using tcp_bpf_sendmsg_redir(), we use sk_msg_return_zero() to uncharge the memory of the specified 'send' size. However, if the first segment of data fails to send, for example, the peer's buffer is full, we need to release all of the msg. When releasing the msg, we haven't uncharged the memory of the subsequent segments. This modification does not make significant logical changes, but only fills in the missing uncharge places. This issue has existed all along, until it was exposed after we added the apply test in test_sockmap: commit 3448ad23b34e ("selftests/bpf: Add apply_bytes test to test_txmsg_redir_wait_sndmem in test_sockmap") Fixes: d3b18ad31f93 ("tls: add bpf support to sk_msg handling") Reported-by: Cong Wang Closes: https://lore.kernel.org/bpf/aAmIi0vlycHtbXeb@pop-os.localdomain/T/#t Signed-off-by: Jiayuan Chen Signed-off-by: Martin KaFai Lau Acked-by: John Fastabend Reviewed-by: Cong Wang Link: https://lore.kernel.org/r/20250425060015.6968-2-jiayuan.chen@linux.dev --- net/tls/tls_sw.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index f3d7d19482da..fc88e34b7f33 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -908,6 +908,13 @@ more_data: &msg_redir, send, flags); lock_sock(sk); if (err < 0) { + /* Regardless of whether the data represented by + * msg_redir is sent successfully, we have already + * uncharged it via sk_msg_return_zero(). The + * msg->sg.size represents the remaining unprocessed + * data, which needs to be uncharged here. + */ + sk_mem_uncharge(sk, msg->sg.size); *copied -= sk_msg_free_nocharge(sk, &msg_redir); msg->sg.size = 0; } From be48b8790a0b921c0957dacf958f320cae477dec Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Fri, 25 Apr 2025 13:59:58 +0800 Subject: [PATCH 081/135] selftests/bpf: Add test to cover sockmap with ktls The selftest can reproduce an issue where we miss the uncharge operation when freeing msg, which will cause the following warning. We fixed the issue and added this reproducer to selftest to ensure it will not happen again. ------------[ cut here ]------------ WARNING: CPU: 1 PID: 40 at net/ipv4/af_inet.c inet_sock_destruct+0x173/0x1d5 Tainted: [W]=WARN Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014 Workqueue: events sk_psock_destroy RIP: 0010:inet_sock_destruct+0x173/0x1d5 RSP: 0018:ffff8880085cfc18 EFLAGS: 00010202 RAX: 1ffff11003dbfc00 RBX: ffff88801edfe3e8 RCX: ffffffff822f5af4 RDX: 0000000000000007 RSI: dffffc0000000000 RDI: ffff88801edfe16c RBP: ffff88801edfe184 R08: ffffed1003dbfc31 R09: 0000000000000000 R10: ffffffff822f5ab7 R11: ffff88801edfe187 R12: ffff88801edfdec0 R13: ffff888020376ac0 R14: ffff888020376ac0 R15: ffff888020376a60 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000556365155830 CR3: 000000001d6aa000 CR4: 0000000000350ef0 Call Trace: __sk_destruct+0x46/0x222 sk_psock_destroy+0x22f/0x242 process_one_work+0x504/0x8a8 ? process_one_work+0x39d/0x8a8 ? __pfx_process_one_work+0x10/0x10 ? worker_thread+0x44/0x2ae ? __list_add_valid_or_report+0x83/0xea ? srso_return_thunk+0x5/0x5f ? __list_add+0x45/0x52 process_scheduled_works+0x73/0x82 worker_thread+0x1ce/0x2ae Acked-by: John Fastabend Signed-off-by: Jiayuan Chen Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250425060015.6968-3-jiayuan.chen@linux.dev --- .../selftests/bpf/prog_tests/sockmap_ktls.c | 76 +++++++++++++++++++ .../selftests/bpf/progs/test_sockmap_ktls.c | 10 +++ 2 files changed, 86 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 3044f54b16d6..b6c471da5c28 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -240,6 +240,80 @@ out: test_sockmap_ktls__destroy(skel); } +static void test_sockmap_ktls_tx_no_buf(int family, int sotype, bool push) +{ + int c = -1, p = -1, one = 1, two = 2; + struct test_sockmap_ktls *skel; + unsigned char *data = NULL; + struct msghdr msg = {0}; + struct iovec iov[2]; + int prog_fd, map_fd; + int txrx_buf = 1024; + int iov_length = 8192; + int err; + + skel = test_sockmap_ktls__open_and_load(); + if (!ASSERT_TRUE(skel, "open ktls skel")) + return; + + err = create_pair(family, sotype, &c, &p); + if (!ASSERT_OK(err, "create_pair()")) + goto out; + + err = setsockopt(c, SOL_SOCKET, SO_RCVBUFFORCE, &txrx_buf, sizeof(int)); + err |= setsockopt(p, SOL_SOCKET, SO_SNDBUFFORCE, &txrx_buf, sizeof(int)); + if (!ASSERT_OK(err, "set buf limit")) + goto out; + + prog_fd = bpf_program__fd(skel->progs.prog_sk_policy_redir); + map_fd = bpf_map__fd(skel->maps.sock_map); + + err = bpf_prog_attach(prog_fd, map_fd, BPF_SK_MSG_VERDICT, 0); + if (!ASSERT_OK(err, "bpf_prog_attach sk msg")) + goto out; + + err = bpf_map_update_elem(map_fd, &one, &c, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(c)")) + goto out; + + err = bpf_map_update_elem(map_fd, &two, &p, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(p)")) + goto out; + + skel->bss->apply_bytes = 1024; + + err = init_ktls_pairs(c, p); + if (!ASSERT_OK(err, "init_ktls_pairs(c, p)")) + goto out; + + data = calloc(iov_length, sizeof(char)); + if (!data) + goto out; + + iov[0].iov_base = data; + iov[0].iov_len = iov_length; + iov[1].iov_base = data; + iov[1].iov_len = iov_length; + msg.msg_iov = iov; + msg.msg_iovlen = 2; + + for (;;) { + err = sendmsg(c, &msg, MSG_DONTWAIT); + if (err <= 0) + break; + } + +out: + if (data) + free(data); + if (c != -1) + close(c); + if (p != -1) + close(p); + + test_sockmap_ktls__destroy(skel); +} + static void run_tests(int family, enum bpf_map_type map_type) { int map; @@ -262,6 +336,8 @@ static void run_ktls_test(int family, int sotype) test_sockmap_ktls_tx_cork(family, sotype, false); if (test__start_subtest("tls tx cork with push")) test_sockmap_ktls_tx_cork(family, sotype, true); + if (test__start_subtest("tls tx egress with no buf")) + test_sockmap_ktls_tx_no_buf(family, sotype, true); } void test_sockmap_ktls(void) diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c index e0f757929ef4..8bdb9987c0c7 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_ktls.c @@ -6,6 +6,7 @@ int cork_byte; int push_start; int push_end; +int apply_bytes; struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); @@ -24,3 +25,12 @@ int prog_sk_policy(struct sk_msg_md *msg) return SK_PASS; } + +SEC("sk_msg") +int prog_sk_policy_redir(struct sk_msg_md *msg) +{ + int two = 2; + + bpf_msg_apply_bytes(msg, apply_bytes); + return bpf_msg_redirect_map(msg, &sock_map, two, 0); +} From 7220eabff8cb4af3b93cd021aa853b9f5df2923f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 9 May 2025 11:03:50 -0700 Subject: [PATCH 082/135] bpf, docs: document open-coded BPF iterators Extract BPF open-coded iterators documentation spread out across a few original commit messages ([0], [1]) into a dedicated doc section under Documentation/bpf/bpf_iterators.rst. Also make explicit expectation that BPF iterator program type should be accompanied by a corresponding open-coded BPF iterator implementation, going forward. [0] https://lore.kernel.org/all/20230308184121.1165081-3-andrii@kernel.org/ [1] https://lore.kernel.org/all/20230308184121.1165081-4-andrii@kernel.org/ Acked-by: Kumar Kartikeya Dwivedi Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20250509180350.2604946-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- Documentation/bpf/bpf_iterators.rst | 113 +++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 3 deletions(-) diff --git a/Documentation/bpf/bpf_iterators.rst b/Documentation/bpf/bpf_iterators.rst index 385cd05aabf5..8f0a4a91b77a 100644 --- a/Documentation/bpf/bpf_iterators.rst +++ b/Documentation/bpf/bpf_iterators.rst @@ -2,10 +2,117 @@ BPF Iterators ============= +-------- +Overview +-------- ----------- -Motivation ----------- +BPF supports two separate entities collectively known as "BPF iterators": BPF +iterator *program type* and *open-coded* BPF iterators. The former is +a stand-alone BPF program type which, when attached and activated by user, +will be called once for each entity (task_struct, cgroup, etc) that is being +iterated. The latter is a set of BPF-side APIs implementing iterator +functionality and available across multiple BPF program types. Open-coded +iterators provide similar functionality to BPF iterator programs, but gives +more flexibility and control to all other BPF program types. BPF iterator +programs, on the other hand, can be used to implement anonymous or BPF +FS-mounted special files, whose contents are generated by attached BPF iterator +program, backed by seq_file functionality. Both are useful depending on +specific needs. + +When adding a new BPF iterator program, it is expected that similar +functionality will be added as open-coded iterator for maximum flexibility. +It's also expected that iteration logic and code will be maximally shared and +reused between two iterator API surfaces. + +------------------------ +Open-coded BPF Iterators +------------------------ + +Open-coded BPF iterators are implemented as tightly-coupled trios of kfuncs +(constructor, next element fetch, destructor) and iterator-specific type +describing on-the-stack iterator state, which is guaranteed by the BPF +verifier to not be tampered with outside of the corresponding +constructor/destructor/next APIs. + +Each kind of open-coded BPF iterator has its own associated +struct bpf_iter_, where denotes a specific type of iterator. +bpf_iter_ state needs to live on BPF program stack, so make sure it's +small enough to fit on BPF stack. For performance reasons its best to avoid +dynamic memory allocation for iterator state and size the state struct big +enough to fit everything necessary. But if necessary, dynamic memory +allocation is a way to bypass BPF stack limitations. Note, state struct size +is part of iterator's user-visible API, so changing it will break backwards +compatibility, so be deliberate about designing it. + +All kfuncs (constructor, next, destructor) have to be named consistently as +bpf_iter__{new,next,destroy}(), respectively. represents iterator +type, and iterator state should be represented as a matching +`struct bpf_iter_` state type. Also, all iter kfuncs should have +a pointer to this `struct bpf_iter_` as the very first argument. + +Additionally: + - Constructor, i.e., `bpf_iter__new()`, can have arbitrary extra + number of arguments. Return type is not enforced either. + - Next method, i.e., `bpf_iter__next()`, has to return a pointer + type and should have exactly one argument: `struct bpf_iter_ *` + (const/volatile/restrict and typedefs are ignored). + - Destructor, i.e., `bpf_iter__destroy()`, should return void and + should have exactly one argument, similar to the next method. + - `struct bpf_iter_` size is enforced to be positive and + a multiple of 8 bytes (to fit stack slots correctly). + +Such strictness and consistency allows to build generic helpers abstracting +important, but boilerplate, details to be able to use open-coded iterators +effectively and ergonomically (see libbpf's bpf_for_each() macro). This is +enforced at kfunc registration point by the kernel. + +Constructor/next/destructor implementation contract is as follows: + - constructor, `bpf_iter__new()`, always initializes iterator state on + the stack. If any of the input arguments are invalid, constructor should + make sure to still initialize it such that subsequent next() calls will + return NULL. I.e., on error, *return error and construct empty iterator*. + Constructor kfunc is marked with KF_ITER_NEW flag. + + - next method, `bpf_iter__next()`, accepts pointer to iterator state + and produces an element. Next method should always return a pointer. The + contract between BPF verifier is that next method *guarantees* that it + will eventually return NULL when elements are exhausted. Once NULL is + returned, subsequent next calls *should keep returning NULL*. Next method + is marked with KF_ITER_NEXT (and should also have KF_RET_NULL as + NULL-returning kfunc, of course). + + - destructor, `bpf_iter__destroy()`, is always called once. Even if + constructor failed or next returned nothing. Destructor frees up any + resources and marks stack space used by `struct bpf_iter_` as usable + for something else. Destructor is marked with KF_ITER_DESTROY flag. + +Any open-coded BPF iterator implementation has to implement at least these +three methods. It is enforced that for any given type of iterator only +applicable constructor/destructor/next are callable. I.e., verifier ensures +you can't pass number iterator state into, say, cgroup iterator's next method. + +From a 10,000-feet BPF verification point of view, next methods are the points +of forking a verification state, which are conceptually similar to what +verifier is doing when validating conditional jumps. Verifier is branching out +`call bpf_iter__next` instruction and simulates two outcomes: NULL +(iteration is done) and non-NULL (new element is returned). NULL is simulated +first and is supposed to reach exit without looping. After that non-NULL case +is validated and it either reaches exit (for trivial examples with no real +loop), or reaches another `call bpf_iter__next` instruction with the +state equivalent to already (partially) validated one. State equivalency at +that point means we technically are going to be looping forever without +"breaking out" out of established "state envelope" (i.e., subsequent +iterations don't add any new knowledge or constraints to the verifier state, +so running 1, 2, 10, or a million of them doesn't matter). But taking into +account the contract stating that iterator next method *has to* return NULL +eventually, we can conclude that loop body is safe and will eventually +terminate. Given we validated logic outside of the loop (NULL case), and +concluded that loop body is safe (though potentially looping many times), +verifier can claim safety of the overall program logic. + +------------------------ +BPF Iterators Motivation +------------------------ There are a few existing ways to dump kernel data into user space. The most popular one is the ``/proc`` system. For example, ``cat /proc/net/tcp6`` dumps From c5bcc8c781277eeffe84fe75144af1da76b7ec3e Mon Sep 17 00:00:00 2001 From: Gregory Bell Date: Mon, 12 May 2025 10:04:12 -0400 Subject: [PATCH 083/135] selftests/bpf: test_verifier verbose causes erroneous failures When running test_verifier with the -v flag and a test with `expected_ret==VERBOSE_ACCEPT`, the opts.log_level is unintentionally overwritten because the verbose flag takes precedence. This leads to a mismatch in the expected and actual contents of bpf_vlog, causing tests to fail incorrectly. Reorder the conditional logic that sets opts.log_level to preserve the expected log level and prevent it from being overridden by -v. Signed-off-by: Gregory Bell Link: https://lore.kernel.org/r/182bf00474f817c99f968a9edb119882f62be0f8.1747058195.git.grbell@redhat.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_verifier.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 447b68509d76..2d13e862b078 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -1559,10 +1559,10 @@ static void do_test_single(struct bpf_test *test, bool unpriv, test->errstr_unpriv : test->errstr; opts.expected_attach_type = test->expected_attach_type; - if (verbose) - opts.log_level = verif_log_level | 4; /* force stats */ - else if (expected_ret == VERBOSE_ACCEPT) + if (expected_ret == VERBOSE_ACCEPT) opts.log_level = 2; + else if (verbose) + opts.log_level = verif_log_level | 4; /* force stats */ else opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL; opts.prog_flags = pflags; From af8a5125a04c128cbcc1daa21d9ba3e5d95a2fc4 Mon Sep 17 00:00:00 2001 From: Gregory Bell Date: Mon, 12 May 2025 10:04:13 -0400 Subject: [PATCH 084/135] selftests/bpf: test_verifier verbose log overflows Tests: - 458/p ld_dw: xor semi-random 64-bit imms, test 5 - 501/p scale: scale test 1 - 502/p scale: scale test 2 fail in verbose mode due to bpf_vlog[] overflowing. These tests generate large verifier logs that exceed the current buffer size, causing them to fail to load. Increase the size of the bpf_vlog[] buffer to accommodate larger logs and prevent false failures during test runs with verbose output. Signed-off-by: Gregory Bell Link: https://lore.kernel.org/r/e49267100f07f099a5877a3a5fc797b702bbaf0c.1747058195.git.grbell@redhat.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 2d13e862b078..27db34ecf3f5 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -734,7 +734,7 @@ static __u32 btf_raw_types[] = { BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr *ptr; */ }; -static char bpf_vlog[UINT_MAX >> 8]; +static char bpf_vlog[UINT_MAX >> 5]; static int load_btf_spec(__u32 *types, int types_len, const char *strings, int strings_len) From 3a320ed325488d07081461663ac9243293006080 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Sat, 10 May 2025 01:24:50 +0100 Subject: [PATCH 085/135] selftests/bpf: Allow skipping docs compilation Currently rst2man is required to build bpf selftests, as the tool is used by Makefile.docs. rst2man may be missing in some build environments and is not essential for selftests. It makes sense to allow user to skip building docs. This patch adds SKIP_DOCS variable into bpf selftests Makefile that when set to 1 allows skipping building docs, for example: make -C tools/testing/selftests TARGETS=bpf SKIP_DOCS=1 Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250510002450.365613-1-mykyta.yatsenko5@gmail.com --- tools/testing/selftests/bpf/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 03663222a0a5..0d04cf54068e 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -358,7 +358,9 @@ $(CROSS_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ prefix= DESTDIR=$(SCRATCH_DIR)/ install-bin endif +ifneq ($(SKIP_DOCS),1) all: docs +endif docs: $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \ From fd5fd538a1f4b34cee6823ba0ddda2f7a55aca96 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Sat, 10 May 2025 18:20:11 +0000 Subject: [PATCH 086/135] libbpf: Use proper errno value in nlattr Return value of the validate_nla() function can be propagated all the way up to users of libbpf API. In case of error this libbpf version of validate_nla returns -1 which will be seen as -EPERM from user's point of view. Instead, return a more reasonable -EINVAL. Fixes: bbf48c18ee0c ("libbpf: add error reporting in XDP") Suggested-by: Andrii Nakryiko Signed-off-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250510182011.2246631-1-a.s.protopopov@gmail.com --- tools/lib/bpf/nlattr.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 975e265eab3b..06663f9ea581 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -63,16 +63,16 @@ static int validate_nla(struct nlattr *nla, int maxtype, minlen = nla_attr_minlen[pt->type]; if (libbpf_nla_len(nla) < minlen) - return -1; + return -EINVAL; if (pt->maxlen && libbpf_nla_len(nla) > pt->maxlen) - return -1; + return -EINVAL; if (pt->type == LIBBPF_NLA_STRING) { char *data = libbpf_nla_data(nla); if (data[libbpf_nla_len(nla) - 1] != '\0') - return -1; + return -EINVAL; } return 0; @@ -118,19 +118,18 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) - goto errout; + return err; } - if (tb[type]) + if (tb[type]) { pr_warn("Attribute of type %#x found multiple times in message, " "previous attribute is being ignored.\n", type); + } tb[type] = nla; } - err = 0; -errout: - return err; + return 0; } /** From d060b6aab031b6113f78cd3d1585115f13386eec Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 12 May 2025 21:53:46 +0100 Subject: [PATCH 087/135] helpers: make few bpf helpers public Make bpf_dynptr_slice_rdwr, bpf_dynptr_check_off_len and __bpf_dynptr_write available outside of the helpers.c by adding their prototypes into linux/include/bpf.h. bpf_dynptr_check_off_len() implementation is moved to header and made inline explicitly, as small function should typically be inlined. These functions are going to be used from bpf_trace.c in the next patch of this series. Acked-by: Andrii Nakryiko Signed-off-by: Mykyta Yatsenko Link: https://lore.kernel.org/r/20250512205348.191079-2-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 14 ++++++++++++++ kernel/bpf/helpers.c | 14 ++------------ 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3f0cc89c0622..83c56f40842b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1349,6 +1349,20 @@ u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); +int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, + void *src, u32 len, u64 flags); +void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, + void *buffer__opt, u32 buffer__szk); + +static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) +{ + u32 size = __bpf_dynptr_size(ptr); + + if (len > size || offset > size - len) + return -E2BIG; + + return 0; +} #ifdef CONFIG_BPF_JIT int bpf_trampoline_link_prog(struct bpf_tramp_link *link, diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index fed53da75025..c96cc6aeae99 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1714,16 +1714,6 @@ void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) memset(ptr, 0, sizeof(*ptr)); } -static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) -{ - u32 size = __bpf_dynptr_size(ptr); - - if (len > size || offset > size - len) - return -E2BIG; - - return 0; -} - BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) { int err; @@ -1810,8 +1800,8 @@ static const struct bpf_func_proto bpf_dynptr_read_proto = { .arg5_type = ARG_ANYTHING, }; -static int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, - u32 len, u64 flags) +int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, void *src, + u32 len, u64 flags) { enum bpf_dynptr_type type; int err; From a498ee7576de24b4b0916ce56cf2686e261a29f7 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 12 May 2025 21:53:47 +0100 Subject: [PATCH 088/135] bpf: Implement dynptr copy kfuncs This patch introduces a new set of kfuncs for working with dynptrs in BPF programs, enabling reading variable-length user or kernel data into dynptr directly. To enable memory-safety, verifier allows only constant-sized reads via existing bpf_probe_read_{user|kernel} etc. kfuncs, dynptr-based kfuncs allow dynamically-sized reads without memory safety shortcomings. The following kfuncs are introduced: * `bpf_probe_read_kernel_dynptr()`: probes kernel-space data into a dynptr * `bpf_probe_read_user_dynptr()`: probes user-space data into a dynptr * `bpf_probe_read_kernel_str_dynptr()`: probes kernel-space string into a dynptr * `bpf_probe_read_user_str_dynptr()`: probes user-space string into a dynptr * `bpf_copy_from_user_dynptr()`: sleepable, copies user-space data into a dynptr for the current task * `bpf_copy_from_user_str_dynptr()`: sleepable, copies user-space string into a dynptr for the current task * `bpf_copy_from_user_task_dynptr()`: sleepable, copies user-space data of the task into a dynptr * `bpf_copy_from_user_task_str_dynptr()`: sleepable, copies user-space string of the task into a dynptr The implementation is built on two generic functions: * __bpf_dynptr_copy * __bpf_dynptr_copy_str These functions take function pointers as arguments, enabling the copying of data from various sources, including both kernel and user space. Use __always_inline for generic functions and callbacks to make sure the compiler doesn't generate indirect calls into callbacks, which is more expensive, especially on some kernel configurations. Inlining allows compiler to put direct calls into all the specific callback implementations (copy_user_data_sleepable, copy_user_data_nofault, and so on). Reviewed-by: Andrii Nakryiko Signed-off-by: Mykyta Yatsenko Link: https://lore.kernel.org/r/20250512205348.191079-3-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 8 ++ kernel/trace/bpf_trace.c | 194 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 202 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index c96cc6aeae99..ebb604e39eb1 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3378,6 +3378,14 @@ BTF_ID_FLAGS(func, bpf_iter_kmem_cache_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLE BTF_ID_FLAGS(func, bpf_iter_kmem_cache_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_local_irq_save) BTF_ID_FLAGS(func, bpf_local_irq_restore) +BTF_ID_FLAGS(func, bpf_probe_read_user_dynptr) +BTF_ID_FLAGS(func, bpf_probe_read_kernel_dynptr) +BTF_ID_FLAGS(func, bpf_probe_read_user_str_dynptr) +BTF_ID_FLAGS(func, bpf_probe_read_kernel_str_dynptr) +BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 868920994517..985ce3854af8 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3466,6 +3466,142 @@ static int __init bpf_kprobe_multi_kfuncs_init(void) late_initcall(bpf_kprobe_multi_kfuncs_init); +typedef int (*copy_fn_t)(void *dst, const void *src, u32 size, struct task_struct *tsk); + +/* + * The __always_inline is to make sure the compiler doesn't + * generate indirect calls into callbacks, which is expensive, + * on some kernel configurations. This allows compiler to put + * direct calls into all the specific callback implementations + * (copy_user_data_sleepable, copy_user_data_nofault, and so on) + */ +static __always_inline int __bpf_dynptr_copy_str(struct bpf_dynptr *dptr, u32 doff, u32 size, + const void *unsafe_src, + copy_fn_t str_copy_fn, + struct task_struct *tsk) +{ + struct bpf_dynptr_kern *dst; + u32 chunk_sz, off; + void *dst_slice; + int cnt, err; + char buf[256]; + + dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); + if (likely(dst_slice)) + return str_copy_fn(dst_slice, unsafe_src, size, tsk); + + dst = (struct bpf_dynptr_kern *)dptr; + if (bpf_dynptr_check_off_len(dst, doff, size)) + return -E2BIG; + + for (off = 0; off < size; off += chunk_sz - 1) { + chunk_sz = min_t(u32, sizeof(buf), size - off); + /* Expect str_copy_fn to return count of copied bytes, including + * zero terminator. Next iteration increment off by chunk_sz - 1 to + * overwrite NUL. + */ + cnt = str_copy_fn(buf, unsafe_src + off, chunk_sz, tsk); + if (cnt < 0) + return cnt; + err = __bpf_dynptr_write(dst, doff + off, buf, cnt, 0); + if (err) + return err; + if (cnt < chunk_sz || chunk_sz == 1) /* we are done */ + return off + cnt; + } + return off; +} + +static __always_inline int __bpf_dynptr_copy(const struct bpf_dynptr *dptr, u32 doff, + u32 size, const void *unsafe_src, + copy_fn_t copy_fn, struct task_struct *tsk) +{ + struct bpf_dynptr_kern *dst; + void *dst_slice; + char buf[256]; + u32 off, chunk_sz; + int err; + + dst_slice = bpf_dynptr_slice_rdwr(dptr, doff, NULL, size); + if (likely(dst_slice)) + return copy_fn(dst_slice, unsafe_src, size, tsk); + + dst = (struct bpf_dynptr_kern *)dptr; + if (bpf_dynptr_check_off_len(dst, doff, size)) + return -E2BIG; + + for (off = 0; off < size; off += chunk_sz) { + chunk_sz = min_t(u32, sizeof(buf), size - off); + err = copy_fn(buf, unsafe_src + off, chunk_sz, tsk); + if (err) + return err; + err = __bpf_dynptr_write(dst, doff + off, buf, chunk_sz, 0); + if (err) + return err; + } + return 0; +} + +static __always_inline int copy_user_data_nofault(void *dst, const void *unsafe_src, + u32 size, struct task_struct *tsk) +{ + return copy_from_user_nofault(dst, (const void __user *)unsafe_src, size); +} + +static __always_inline int copy_user_data_sleepable(void *dst, const void *unsafe_src, + u32 size, struct task_struct *tsk) +{ + int ret; + + if (!tsk) /* Read from the current task */ + return copy_from_user(dst, (const void __user *)unsafe_src, size); + + ret = access_process_vm(tsk, (unsigned long)unsafe_src, dst, size, 0); + if (ret != size) + return -EFAULT; + return 0; +} + +static __always_inline int copy_kernel_data_nofault(void *dst, const void *unsafe_src, + u32 size, struct task_struct *tsk) +{ + return copy_from_kernel_nofault(dst, unsafe_src, size); +} + +static __always_inline int copy_user_str_nofault(void *dst, const void *unsafe_src, + u32 size, struct task_struct *tsk) +{ + return strncpy_from_user_nofault(dst, (const void __user *)unsafe_src, size); +} + +static __always_inline int copy_user_str_sleepable(void *dst, const void *unsafe_src, + u32 size, struct task_struct *tsk) +{ + int ret; + + if (unlikely(size == 0)) + return 0; + + if (tsk) { + ret = copy_remote_vm_str(tsk, (unsigned long)unsafe_src, dst, size, 0); + } else { + ret = strncpy_from_user(dst, (const void __user *)unsafe_src, size - 1); + /* strncpy_from_user does not guarantee NUL termination */ + if (ret >= 0) + ((char *)dst)[ret] = '\0'; + } + + if (ret < 0) + return ret; + return ret + 1; +} + +static __always_inline int copy_kernel_str_nofault(void *dst, const void *unsafe_src, + u32 size, struct task_struct *tsk) +{ + return strncpy_from_kernel_nofault(dst, unsafe_src, size); +} + __bpf_kfunc_start_defs(); __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, @@ -3477,4 +3613,62 @@ __bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid return bpf_send_signal_common(sig, type, task, value); } +__bpf_kfunc int bpf_probe_read_user_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void __user *unsafe_ptr__ign) +{ + return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, + copy_user_data_nofault, NULL); +} + +__bpf_kfunc int bpf_probe_read_kernel_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr__ign) +{ + return __bpf_dynptr_copy(dptr, off, size, unsafe_ptr__ign, + copy_kernel_data_nofault, NULL); +} + +__bpf_kfunc int bpf_probe_read_user_str_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void __user *unsafe_ptr__ign) +{ + return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, + copy_user_str_nofault, NULL); +} + +__bpf_kfunc int bpf_probe_read_kernel_str_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr__ign) +{ + return __bpf_dynptr_copy_str(dptr, off, size, unsafe_ptr__ign, + copy_kernel_str_nofault, NULL); +} + +__bpf_kfunc int bpf_copy_from_user_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void __user *unsafe_ptr__ign) +{ + return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, + copy_user_data_sleepable, NULL); +} + +__bpf_kfunc int bpf_copy_from_user_str_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void __user *unsafe_ptr__ign) +{ + return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, + copy_user_str_sleepable, NULL); +} + +__bpf_kfunc int bpf_copy_from_user_task_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void __user *unsafe_ptr__ign, + struct task_struct *tsk) +{ + return __bpf_dynptr_copy(dptr, off, size, (const void *)unsafe_ptr__ign, + copy_user_data_sleepable, tsk); +} + +__bpf_kfunc int bpf_copy_from_user_task_str_dynptr(struct bpf_dynptr *dptr, u32 off, + u32 size, const void __user *unsafe_ptr__ign, + struct task_struct *tsk) +{ + return __bpf_dynptr_copy_str(dptr, off, size, (const void *)unsafe_ptr__ign, + copy_user_str_sleepable, tsk); +} + __bpf_kfunc_end_defs(); From c61bcd29eda9ea8db753ad5217fd24d9ee42a96b Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Mon, 12 May 2025 21:53:48 +0100 Subject: [PATCH 089/135] selftests/bpf: introduce tests for dynptr copy kfuncs Introduce selftests verifying newly-added dynptr copy kfuncs. Covering contiguous and non-contiguous memory backed dynptrs. Disable test_probe_read_user_str_dynptr that triggers bug in strncpy_from_user_nofault. Patch to fix the issue [1]. [1] https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/ Acked-by: Andrii Nakryiko Signed-off-by: Mykyta Yatsenko Link: https://lore.kernel.org/r/20250512205348.191079-4-mykyta.yatsenko5@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST | 1 + .../testing/selftests/bpf/prog_tests/dynptr.c | 13 + .../selftests/bpf/progs/dynptr_success.c | 230 ++++++++++++++++++ 3 files changed, 244 insertions(+) diff --git a/tools/testing/selftests/bpf/DENYLIST b/tools/testing/selftests/bpf/DENYLIST index f748f2c33b22..1789a61d0a9b 100644 --- a/tools/testing/selftests/bpf/DENYLIST +++ b/tools/testing/selftests/bpf/DENYLIST @@ -1,5 +1,6 @@ # TEMPORARY # Alphabetical order +dynptr/test_probe_read_user_str_dynptr # disabled until https://patchwork.kernel.org/project/linux-mm/patch/20250422131449.57177-1-mykyta.yatsenko5@gmail.com/ makes it into the bpf-next get_stack_raw_tp # spams with kernel warnings until next bpf -> bpf-next merge stacktrace_build_id stacktrace_build_id_nmi diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index e29cc16124c2..62e7ec775f24 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -33,10 +33,19 @@ static struct { {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP}, + {"test_probe_read_user_dynptr", SETUP_XDP_PROG}, + {"test_probe_read_kernel_dynptr", SETUP_XDP_PROG}, + {"test_probe_read_user_str_dynptr", SETUP_XDP_PROG}, + {"test_probe_read_kernel_str_dynptr", SETUP_XDP_PROG}, + {"test_copy_from_user_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_copy_from_user_str_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_copy_from_user_task_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_copy_from_user_task_str_dynptr", SETUP_SYSCALL_SLEEP}, }; static void verify_success(const char *prog_name, enum test_setup_type setup_type) { + char user_data[384] = {[0 ... 382] = 'a', '\0'}; struct dynptr_success *skel; struct bpf_program *prog; struct bpf_link *link; @@ -58,6 +67,10 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ if (!ASSERT_OK(err, "dynptr_success__load")) goto cleanup; + skel->bss->user_ptr = user_data; + skel->data->test_len[0] = sizeof(user_data); + memcpy(skel->bss->expected_str, user_data, sizeof(user_data)); + switch (setup_type) { case SETUP_SYSCALL_SLEEP: link = bpf_program__attach(prog); diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index e1fba28e4a86..a0391f9da2d4 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -680,3 +680,233 @@ out: bpf_ringbuf_discard_dynptr(&ptr_buf, 0); return XDP_DROP; } + +void *user_ptr; +/* Contains the copy of the data pointed by user_ptr. + * Size 384 to make it not fit into a single kernel chunk when copying + * but less than the maximum bpf stack size (512). + */ +char expected_str[384]; +__u32 test_len[7] = {0/* placeholder */, 0, 1, 2, 255, 256, 257}; + +typedef int (*bpf_read_dynptr_fn_t)(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr); + +/* Returns the offset just before the end of the maximum sized xdp fragment. + * Any write larger than 32 bytes will be split between 2 fragments. + */ +__u32 xdp_near_frag_end_offset(void) +{ + const __u32 headroom = 256; + const __u32 max_frag_size = __PAGE_SIZE - headroom - sizeof(struct skb_shared_info); + + /* 32 bytes before the approximate end of the fragment */ + return max_frag_size - 32; +} + +/* Use __always_inline on test_dynptr_probe[_str][_xdp]() and callbacks + * of type bpf_read_dynptr_fn_t to prevent compiler from generating + * indirect calls that make program fail to load with "unknown opcode" error. + */ +static __always_inline void test_dynptr_probe(void *ptr, bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + char buf[sizeof(expected_str)]; + struct bpf_dynptr ptr_buf; + int i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + err = bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + err = err ?: bpf_read_dynptr_fn(&ptr_buf, 0, test_len[i], ptr); + if (len > sizeof(buf)) + break; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); + + if (err || bpf_memcmp(expected_str, buf, len)) + err = 1; + + /* Reset buffer and dynptr */ + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_buf, 0, buf, len, 0); + } + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); +} + +static __always_inline void test_dynptr_probe_str(void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + char buf[sizeof(expected_str)]; + struct bpf_dynptr ptr_buf; + __u32 cnt, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(buf), 0, &ptr_buf); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + cnt = bpf_read_dynptr_fn(&ptr_buf, 0, len, ptr); + if (cnt != len) + err = 1; + + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_buf, 0, 0); + if (!len) + continue; + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') + err = 1; + } + bpf_ringbuf_discard_dynptr(&ptr_buf, 0); +} + +static __always_inline void test_dynptr_probe_xdp(struct xdp_md *xdp, void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + struct bpf_dynptr ptr_xdp; + char buf[sizeof(expected_str)]; + __u32 off, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + off = xdp_near_frag_end_offset(); + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + err = err ?: bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); + if (err || bpf_memcmp(expected_str, buf, len)) + err = 1; + /* Reset buffer and dynptr */ + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); + } +} + +static __always_inline void test_dynptr_probe_str_xdp(struct xdp_md *xdp, void *ptr, + bpf_read_dynptr_fn_t bpf_read_dynptr_fn) +{ + struct bpf_dynptr ptr_xdp; + char buf[sizeof(expected_str)]; + __u32 cnt, off, i; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return; + + off = xdp_near_frag_end_offset(); + err = bpf_dynptr_from_xdp(xdp, 0, &ptr_xdp); + if (err) + return; + + bpf_for(i, 0, ARRAY_SIZE(test_len)) { + __u32 len = test_len[i]; + + cnt = bpf_read_dynptr_fn(&ptr_xdp, off, len, ptr); + if (cnt != len) + err = 1; + + if (len > sizeof(buf)) + continue; + err = err ?: bpf_dynptr_read(&buf, len, &ptr_xdp, off, 0); + + if (!len) + continue; + if (err || bpf_memcmp(expected_str, buf, len - 1) || buf[len - 1] != '\0') + err = 1; + + __builtin_memset(buf, 0, sizeof(buf)); + err = err ?: bpf_dynptr_write(&ptr_xdp, off, buf, len, 0); + } +} + +SEC("xdp") +int test_probe_read_user_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe(user_ptr, bpf_probe_read_user_dynptr); + if (!err) + test_dynptr_probe_xdp(xdp, user_ptr, bpf_probe_read_user_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_kernel_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe(expected_str, bpf_probe_read_kernel_dynptr); + if (!err) + test_dynptr_probe_xdp(xdp, expected_str, bpf_probe_read_kernel_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_user_str_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe_str(user_ptr, bpf_probe_read_user_str_dynptr); + if (!err) + test_dynptr_probe_str_xdp(xdp, user_ptr, bpf_probe_read_user_str_dynptr); + return XDP_PASS; +} + +SEC("xdp") +int test_probe_read_kernel_str_dynptr(struct xdp_md *xdp) +{ + test_dynptr_probe_str(expected_str, bpf_probe_read_kernel_str_dynptr); + if (!err) + test_dynptr_probe_str_xdp(xdp, expected_str, bpf_probe_read_kernel_str_dynptr); + return XDP_PASS; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_dynptr(void *ctx) +{ + test_dynptr_probe(user_ptr, bpf_copy_from_user_dynptr); + return 0; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_str_dynptr(void *ctx) +{ + test_dynptr_probe_str(user_ptr, bpf_copy_from_user_str_dynptr); + return 0; +} + +static int bpf_copy_data_from_user_task(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + return bpf_copy_from_user_task_dynptr(dptr, off, size, unsafe_ptr, task); +} + +static int bpf_copy_data_from_user_task_str(struct bpf_dynptr *dptr, u32 off, + u32 size, const void *unsafe_ptr) +{ + struct task_struct *task = bpf_get_current_task_btf(); + + return bpf_copy_from_user_task_str_dynptr(dptr, off, size, unsafe_ptr, task); +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_task_dynptr(void *ctx) +{ + test_dynptr_probe(user_ptr, bpf_copy_data_from_user_task); + return 0; +} + +SEC("fentry.s/" SYS_PREFIX "sys_nanosleep") +int test_copy_from_user_task_str_dynptr(void *ctx) +{ + test_dynptr_probe_str(user_ptr, bpf_copy_data_from_user_task_str); + return 0; +} From 79af71c5fe44f3973c666bab3e9e5845812d3b8b Mon Sep 17 00:00:00 2001 From: Khaled Elnaggar Date: Tue, 13 May 2025 04:58:59 +0300 Subject: [PATCH 090/135] docs: bpf: Fix bullet point formatting warning Fix indentation for a bullet list item in bpf_iterators.rst. According to reStructuredText rules, bullet list item bodies must be consistently indented relative to the bullet. The indentation of the first line after the bullet determines the alignment for the rest of the item body. Reported by smatch: /linux/Documentation/bpf/bpf_iterators.rst:55: WARNING: Bullet list ends without a blank line; unexpected unindent. [docutils] Fixes: 7220eabff8cb ("bpf, docs: document open-coded BPF iterators") Signed-off-by: Khaled Elnaggar Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250513015901.475207-1-khaledelnaggarlinux@gmail.com --- Documentation/bpf/bpf_iterators.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/bpf/bpf_iterators.rst b/Documentation/bpf/bpf_iterators.rst index 8f0a4a91b77a..189e3ec1c6c8 100644 --- a/Documentation/bpf/bpf_iterators.rst +++ b/Documentation/bpf/bpf_iterators.rst @@ -52,14 +52,14 @@ a pointer to this `struct bpf_iter_` as the very first argument. Additionally: - Constructor, i.e., `bpf_iter__new()`, can have arbitrary extra - number of arguments. Return type is not enforced either. + number of arguments. Return type is not enforced either. - Next method, i.e., `bpf_iter__next()`, has to return a pointer - type and should have exactly one argument: `struct bpf_iter_ *` - (const/volatile/restrict and typedefs are ignored). + type and should have exactly one argument: `struct bpf_iter_ *` + (const/volatile/restrict and typedefs are ignored). - Destructor, i.e., `bpf_iter__destroy()`, should return void and - should have exactly one argument, similar to the next method. + should have exactly one argument, similar to the next method. - `struct bpf_iter_` size is enforced to be positive and - a multiple of 8 bytes (to fit stack slots correctly). + a multiple of 8 bytes (to fit stack slots correctly). Such strictness and consistency allows to build generic helpers abstracting important, but boilerplate, details to be able to use open-coded iterators From 3880cdbed1c4607e378f58fa924c5d6df900d1d3 Mon Sep 17 00:00:00 2001 From: Tao Chen Date: Tue, 13 May 2025 12:27:47 +0800 Subject: [PATCH 091/135] bpf: Fix WARN() in get_bpf_raw_tp_regs syzkaller reported an issue: WARNING: CPU: 3 PID: 5971 at kernel/trace/bpf_trace.c:1861 get_bpf_raw_tp_regs+0xa4/0x100 kernel/trace/bpf_trace.c:1861 Modules linked in: CPU: 3 UID: 0 PID: 5971 Comm: syz-executor205 Not tainted 6.15.0-rc5-syzkaller-00038-g707df3375124 #0 PREEMPT(full) Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 RIP: 0010:get_bpf_raw_tp_regs+0xa4/0x100 kernel/trace/bpf_trace.c:1861 RSP: 0018:ffffc90003636fa8 EFLAGS: 00010293 RAX: 0000000000000000 RBX: 0000000000000003 RCX: ffffffff81c6bc4c RDX: ffff888032efc880 RSI: ffffffff81c6bc83 RDI: 0000000000000005 RBP: ffff88806a730860 R08: 0000000000000005 R09: 0000000000000003 R10: 0000000000000004 R11: 0000000000000000 R12: 0000000000000004 R13: 0000000000000001 R14: ffffc90003637008 R15: 0000000000000900 FS: 0000000000000000(0000) GS:ffff8880d6cdf000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f7baee09130 CR3: 0000000029f5a000 CR4: 0000000000352ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1934 [inline] bpf_get_stack_raw_tp+0x24/0x160 kernel/trace/bpf_trace.c:1931 bpf_prog_ec3b2eefa702d8d3+0x43/0x47 bpf_dispatcher_nop_func include/linux/bpf.h:1316 [inline] __bpf_prog_run include/linux/filter.h:718 [inline] bpf_prog_run include/linux/filter.h:725 [inline] __bpf_trace_run kernel/trace/bpf_trace.c:2363 [inline] bpf_trace_run3+0x23f/0x5a0 kernel/trace/bpf_trace.c:2405 __bpf_trace_mmap_lock_acquire_returned+0xfc/0x140 include/trace/events/mmap_lock.h:47 __traceiter_mmap_lock_acquire_returned+0x79/0xc0 include/trace/events/mmap_lock.h:47 __do_trace_mmap_lock_acquire_returned include/trace/events/mmap_lock.h:47 [inline] trace_mmap_lock_acquire_returned include/trace/events/mmap_lock.h:47 [inline] __mmap_lock_do_trace_acquire_returned+0x138/0x1f0 mm/mmap_lock.c:35 __mmap_lock_trace_acquire_returned include/linux/mmap_lock.h:36 [inline] mmap_read_trylock include/linux/mmap_lock.h:204 [inline] stack_map_get_build_id_offset+0x535/0x6f0 kernel/bpf/stackmap.c:157 __bpf_get_stack+0x307/0xa10 kernel/bpf/stackmap.c:483 ____bpf_get_stack kernel/bpf/stackmap.c:499 [inline] bpf_get_stack+0x32/0x40 kernel/bpf/stackmap.c:496 ____bpf_get_stack_raw_tp kernel/trace/bpf_trace.c:1941 [inline] bpf_get_stack_raw_tp+0x124/0x160 kernel/trace/bpf_trace.c:1931 bpf_prog_ec3b2eefa702d8d3+0x43/0x47 Tracepoint like trace_mmap_lock_acquire_returned may cause nested call as the corner case show above, which will be resolved with more general method in the future. As a result, WARN_ON_ONCE will be triggered. As Alexei suggested, remove the WARN_ON_ONCE first. Fixes: 9594dc3c7e71 ("bpf: fix nested bpf tracepoints with per-cpu data") Reported-by: syzbot+45b0c89a0fc7ae8dbadc@syzkaller.appspotmail.com Suggested-by: Alexei Starovoitov Signed-off-by: Tao Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250513042747.757042-1-chen.dylane@linux.dev Closes: https://lore.kernel.org/bpf/8bc2554d-1052-4922-8832-e0078a033e1d@gmail.com --- kernel/trace/bpf_trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 985ce3854af8..b0eb721fcfb5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1753,7 +1753,7 @@ static struct pt_regs *get_bpf_raw_tp_regs(void) struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs); int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level); - if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) { + if (nest_level > ARRAY_SIZE(tp_regs->regs)) { this_cpu_dec(bpf_raw_tp_nest_level); return ERR_PTR(-EBUSY); } From bc049387b41f41bee61e8cc338a5e99ca9798a09 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Tue, 13 May 2025 07:28:12 -0700 Subject: [PATCH 092/135] bpf: Add support for __prog argument suffix to pass in prog->aux Instead of hardcoding the list of kfuncs that need prog->aux passed to them with a combination of fixup_kfunc_call adjustment + __ign suffix, combine both in __prog suffix, which ignores the argument passed in, and fixes it up to the prog->aux. This allows kfuncs to have the prog->aux passed into them without having to touch the verifier. Cc: Tejun Heo Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20250513142812.1021591-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- Documentation/bpf/kfuncs.rst | 17 +++++++++++++++++ include/linux/bpf_verifier.h | 1 + kernel/bpf/helpers.c | 4 ++-- kernel/bpf/verifier.c | 33 +++++++++++++++++++++++++++------ 4 files changed, 47 insertions(+), 8 deletions(-) diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index a8f5782bd833..ae468b781d31 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -160,6 +160,23 @@ Or:: ... } +2.2.6 __prog Annotation +--------------------------- +This annotation is used to indicate that the argument needs to be fixed up to +the bpf_prog_aux of the caller BPF program. Any value passed into this argument +is ignored, and rewritten by the verifier. + +An example is given below:: + + __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, + int (callback_fn)(void *map, int *key, void *value), + unsigned int flags, + void *aux__prog) + { + struct bpf_prog_aux *aux = aux__prog; + ... + } + .. _BPF_kfunc_nodef: 2.3 Using an existing kernel function diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 9734544b6957..cedd66867ecf 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -591,6 +591,7 @@ struct bpf_insn_aux_data { * bpf_fastcall pattern. */ u8 fastcall_spills_num:3; + u8 arg_prog:4; /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index ebb604e39eb1..c1113b74e1e2 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3002,9 +3002,9 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, int (callback_fn)(void *map, int *key, void *value), unsigned int flags, - void *aux__ign) + void *aux__prog) { - struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign; + struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__prog; struct bpf_async_kern *async = (struct bpf_async_kern *)wq; if (flags) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 28f5a7899bd6..f6d3655b3a7a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -322,6 +322,7 @@ struct bpf_kfunc_call_arg_meta { struct btf *arg_btf; u32 arg_btf_id; bool arg_owning_ref; + bool arg_prog; struct { struct btf_field *field; @@ -11897,6 +11898,11 @@ static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param return btf_param_match_suffix(btf, arg, "__irq_flag"); } +static bool is_kfunc_arg_prog(const struct btf *btf, const struct btf_param *arg) +{ + return btf_param_match_suffix(btf, arg, "__prog"); +} + static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, const struct btf_param *arg, const char *name) @@ -12938,6 +12944,17 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ if (is_kfunc_arg_ignore(btf, &args[i])) continue; + if (is_kfunc_arg_prog(btf, &args[i])) { + /* Used to reject repeated use of __prog. */ + if (meta->arg_prog) { + verbose(env, "Only 1 prog->aux argument supported per-kfunc\n"); + return -EFAULT; + } + meta->arg_prog = true; + cur_aux(env)->arg_prog = regno; + continue; + } + if (btf_type_is_scalar(t)) { if (reg->type != SCALAR_VALUE) { verbose(env, "R%d is not a scalar\n", regno); @@ -21517,13 +21534,17 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1; - } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) { - struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) }; + } - insn_buf[0] = ld_addrs[0]; - insn_buf[1] = ld_addrs[1]; - insn_buf[2] = *insn; - *cnt = 3; + if (env->insn_aux_data[insn_idx].arg_prog) { + u32 regno = env->insn_aux_data[insn_idx].arg_prog; + struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(regno, (long)env->prog->aux) }; + int idx = *cnt; + + insn_buf[idx++] = ld_addrs[0]; + insn_buf[idx++] = ld_addrs[1]; + insn_buf[idx++] = *insn; + *cnt = idx; } return 0; } From d0445d7dd3fd9b15af7564c38d7aa3cbc29778ee Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Wed, 14 May 2025 12:32:20 +0100 Subject: [PATCH 093/135] libbpf: Check bpf_map_skeleton link for NULL Avoid dereferencing bpf_map_skeleton's link field if it's NULL. If BPF map skeleton is created with the size, that indicates containing link field, but the field was not actually initialized with valid bpf_link pointer, libbpf crashes. This may happen when using libbpf-rs skeleton. Skeleton loading may still progress, but user needs to attach struct_ops map separately. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250514113220.219095-1-mykyta.yatsenko5@gmail.com --- tools/lib/bpf/libbpf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 617cfb9a7ff5..e9c641a2fb20 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -14102,6 +14102,12 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) } link = map_skel->link; + if (!link) { + pr_warn("map '%s': BPF map skeleton link is uninitialized\n", + bpf_map__name(map)); + continue; + } + if (*link) continue; From 4dd372de3fde1afab3adf231e8b2962c40200387 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 14 May 2025 14:40:20 -0700 Subject: [PATCH 094/135] selftests/bpf: Relax TCPOPT_WINDOW validation in test_tcp_custom_syncookie.c. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The custom syncookie test expects TCPOPT_WINDOW to be 7 based on the kernel’s behaviour at the time, but the upcoming series [0] will bump it to 10. Let's relax the test to allow any valid TCPOPT_WINDOW value in the range 1–14. Signed-off-by: Kuniyuki Iwashima Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/netdev/20250513193919.1089692-1-edumazet@google.com/ #[0] Link: https://patch.msgid.link/20250514214021.85187-1-kuniyu@amazon.com --- tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c index eb5cca1fce16..7d5293de1952 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -294,7 +294,9 @@ static int tcp_validate_sysctl(struct tcp_syncookie *ctx) (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6)) goto err; - if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7) + if (!ctx->attrs.wscale_ok || + !ctx->attrs.snd_wscale || + ctx->attrs.snd_wscale >= BPF_SYNCOOKIE_WSCALE_MASK) goto err; if (!ctx->attrs.tstamp_ok) From 5f55f2168432298f5a55294831ab6a76a10cb3c3 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 12 May 2025 14:26:15 +0200 Subject: [PATCH 095/135] s390/bpf: Store backchain even for leaf progs Currently a crash in a leaf prog (caused by a bug) produces the following call trace: [<000003ff600ebf00>] bpf_prog_6df0139e1fbf2789_fentry+0x20/0x78 [<0000000000000000>] 0x0 This is because leaf progs do not store backchain. Fix by making all progs do it. This is what GCC and Clang-generated code does as well. Now the call trace looks like this: [<000003ff600eb0f2>] bpf_prog_6df0139e1fbf2789_fentry+0x2a/0x80 [<000003ff600ed096>] bpf_trampoline_201863462940+0x96/0xf4 [<000003ff600e3a40>] bpf_prog_05f379658fdd72f2_classifier_0+0x58/0xc0 [<000003ffe0aef070>] bpf_test_run+0x210/0x390 [<000003ffe0af0dc2>] bpf_prog_test_run_skb+0x25a/0x668 [<000003ffe038a90e>] __sys_bpf+0xa46/0xdb0 [<000003ffe038ad0c>] __s390x_sys_bpf+0x44/0x50 [<000003ffe0defea8>] __do_syscall+0x150/0x280 [<000003ffe0e01d5c>] system_call+0x74/0x98 Fixes: 054623105728 ("s390/bpf: Add s390x eBPF JIT compiler backend") Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250512122717.54878-1-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 0776dfde2dba..945106b5562d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -605,17 +605,15 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* Setup stack and backchain */ if (is_first_pass(jit) || (jit->seen & SEEN_STACK)) { - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - /* lgr %w1,%r15 (backchain) */ - EMIT4(0xb9040000, REG_W1, REG_15); + /* lgr %w1,%r15 (backchain) */ + EMIT4(0xb9040000, REG_W1, REG_15); /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */ EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED); /* aghi %r15,-STK_OFF */ EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth)); - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - /* stg %w1,152(%r15) (backchain) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, - REG_15, 152); + /* stg %w1,152(%r15) (backchain) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, + REG_15, 152); } } From 94bde253d3ae5d8a01cb958663b12daef1d06574 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 12 May 2025 22:57:30 +0200 Subject: [PATCH 096/135] bpf: Pass the same orig_call value to trampoline functions There is currently some confusion in the s390x JIT regarding whether orig_call can be NULL and what that means. Originally the NULL value was used to distinguish the struct_ops case, but this was superseded by BPF_TRAMP_F_INDIRECT (see commit 0c970ed2f87c ("s390/bpf: Fix indirect trampoline generation"). The remaining reason to have this check is that NULL can actually be passed to the arch_bpf_trampoline_size() call - but not to the respective arch_prepare_bpf_trampoline()! call - by bpf_struct_ops_prepare_trampoline(). Remove this asymmetry by passing stub_func to both functions, so that JITs may rely on orig_call never being NULL. Signed-off-by: Ilya Leoshkevich Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20250512221911.61314-2-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/bpf_struct_ops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index db13ee70d94d..96113633e391 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -601,7 +601,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, if (model->ret_size > 0) flags |= BPF_TRAMP_F_RET_FENTRY_RET; - size = arch_bpf_trampoline_size(model, flags, tlinks, NULL); + size = arch_bpf_trampoline_size(model, flags, tlinks, stub_func); if (size <= 0) return size ? : -EFAULT; From 8e57cf09c84cac99eb31354a3cc70f8b8981bfc2 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 12 May 2025 22:57:31 +0200 Subject: [PATCH 097/135] s390/bpf: Remove the orig_call NULL check Now that orig_call can never be NULL, remove the respective check. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250512221911.61314-3-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 945106b5562d..6f22b5199c20 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2583,9 +2583,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, if (nr_stack_args > MAX_NR_STACK_ARGS) return -ENOTSUPP; - /* Return to %r14, since func_addr and %r0 are not available. */ - if ((!func_addr && !(flags & BPF_TRAMP_F_ORIG_STACK)) || - (flags & BPF_TRAMP_F_INDIRECT)) + /* Return to %r14 in the struct_ops case. */ + if (flags & BPF_TRAMP_F_INDIRECT) flags |= BPF_TRAMP_F_SKIP_FRAME; /* From 1cb0f56d96185cb20e63e191fc291191823e6f52 Mon Sep 17 00:00:00 2001 From: Paul Chaignon Date: Mon, 19 May 2025 15:43:57 +0200 Subject: [PATCH 098/135] bpf: WARN_ONCE on verifier bugs Throughout the verifier's logic, there are multiple checks for inconsistent states that should never happen and would indicate a verifier bug. These bugs are typically logged in the verifier logs and sometimes preceded by a WARN_ONCE. This patch reworks these checks to consistently emit a verifier log AND a warning when CONFIG_DEBUG_KERNEL is enabled. The consistent use of WARN_ONCE should help fuzzers (ex. syzkaller) expose any situation where they are actually able to reach one of those buggy verifier states. Acked-by: Andrii Nakryiko Signed-off-by: Paul Chaignon Link: https://lore.kernel.org/r/aCs1nYvNNMq8dAWP@mail.gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 6 ++ include/linux/bpf_verifier.h | 11 +++ kernel/bpf/btf.c | 4 +- kernel/bpf/verifier.c | 141 +++++++++++++++-------------------- 4 files changed, 79 insertions(+), 83 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 83c56f40842b..5b25d278409b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -346,6 +346,12 @@ static inline const char *btf_field_type_name(enum btf_field_type type) } } +#if IS_ENABLED(CONFIG_DEBUG_KERNEL) +#define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) +#else +#define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) +#endif + static inline u32 btf_field_type_size(enum btf_field_type type) { switch (type) { diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index cedd66867ecf..78c97e12ea4e 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -839,6 +839,17 @@ __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env, u32 insn_off, const char *prefix_fmt, ...); +#define verifier_bug_if(cond, env, fmt, args...) \ + ({ \ + bool __cond = (cond); \ + if (unlikely(__cond)) { \ + BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \ + bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \ + } \ + (__cond); \ + }) +#define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args) + static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { struct bpf_verifier_state *cur = env->cur_state; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 6b21ca67070c..0f7828380895 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7659,7 +7659,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog) return 0; if (!prog->aux->func_info) { - bpf_log(log, "Verifier bug\n"); + verifier_bug(env, "func_info undefined"); return -EFAULT; } @@ -7683,7 +7683,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog) tname = btf_name_by_offset(btf, fn_t->name_off); if (prog->aux->func_info_aux[subprog].unreliable) { - bpf_log(log, "Verifier bug in function %s()\n", tname); + verifier_bug(env, "unreliable BTF for function %s()", tname); return -EFAULT; } if (prog_type == BPF_PROG_TYPE_EXT) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f6d3655b3a7a..d5807d2efc92 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1924,11 +1924,8 @@ static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_env *env, u32 steps = 0; while (topmost && topmost->loop_entry) { - if (steps++ > st->dfs_depth) { - WARN_ONCE(true, "verifier bug: infinite loop in get_loop_entry\n"); - verbose(env, "verifier bug: infinite loop in get_loop_entry()\n"); + if (verifier_bug_if(steps++ > st->dfs_depth, env, "infinite loop")) return ERR_PTR(-EFAULT); - } topmost = topmost->loop_entry; } return topmost; @@ -3460,12 +3457,11 @@ static int mark_reg_read(struct bpf_verifier_env *env, /* if read wasn't screened by an earlier write ... */ if (writes && state->live & REG_LIVE_WRITTEN) break; - if (parent->live & REG_LIVE_DONE) { - verbose(env, "verifier BUG type %s var_off %lld off %d\n", - reg_type_str(env, parent->type), - parent->var_off.value, parent->off); + if (verifier_bug_if(parent->live & REG_LIVE_DONE, env, + "type %s var_off %lld off %d", + reg_type_str(env, parent->type), + parent->var_off.value, parent->off)) return -EFAULT; - } /* The first condition is more likely to be true than the * second, checked it first. */ @@ -3858,14 +3854,14 @@ static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_s /* atomic instructions push insn_flags twice, for READ and * WRITE sides, but they should agree on stack slot */ - WARN_ONCE((env->cur_hist_ent->flags & insn_flags) && - (env->cur_hist_ent->flags & insn_flags) != insn_flags, - "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n", - env->insn_idx, env->cur_hist_ent->flags, insn_flags); + verifier_bug_if((env->cur_hist_ent->flags & insn_flags) && + (env->cur_hist_ent->flags & insn_flags) != insn_flags, + env, "insn history: insn_idx %d cur flags %x new flags %x", + env->insn_idx, env->cur_hist_ent->flags, insn_flags); env->cur_hist_ent->flags |= insn_flags; - WARN_ONCE(env->cur_hist_ent->linked_regs != 0, - "verifier insn history bug: insn_idx %d linked_regs != 0: %#llx\n", - env->insn_idx, env->cur_hist_ent->linked_regs); + verifier_bug_if(env->cur_hist_ent->linked_regs != 0, env, + "insn history: insn_idx %d linked_regs: %#llx", + env->insn_idx, env->cur_hist_ent->linked_regs); env->cur_hist_ent->linked_regs = linked_regs; return 0; } @@ -3988,8 +3984,7 @@ static inline u32 bt_empty(struct backtrack_state *bt) static inline int bt_subprog_enter(struct backtrack_state *bt) { if (bt->frame == MAX_CALL_FRAMES - 1) { - verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(bt->env, "subprog enter from frame %d", bt->frame); return -EFAULT; } bt->frame++; @@ -3999,8 +3994,7 @@ static inline int bt_subprog_enter(struct backtrack_state *bt) static inline int bt_subprog_exit(struct backtrack_state *bt) { if (bt->frame == 0) { - verbose(bt->env, "BUG subprog exit from frame 0\n"); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(bt->env, "subprog exit from frame 0"); return -EFAULT; } bt->frame--; @@ -4278,14 +4272,15 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, * should be literally next instruction in * caller program */ - WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug"); + verifier_bug_if(idx + 1 != subseq_idx, env, + "extra insn from subprog"); /* r1-r5 are invalidated after subprog call, * so for global func call it shouldn't be set * anymore */ if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { - verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "global subprog unexpected regs %x", + bt_reg_mask(bt)); return -EFAULT; } /* global subprog always sets R0 */ @@ -4299,16 +4294,17 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, * the current frame should be zero by now */ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { - verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "static subprog unexpected regs %x", + bt_reg_mask(bt)); return -EFAULT; } /* we are now tracking register spills correctly, * so any instance of leftover slots is a bug */ if (bt_stack_mask(bt) != 0) { - verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug (subprog leftover stack slots)"); + verifier_bug(env, + "static subprog leftover stack slots %llx", + bt_stack_mask(bt)); return -EFAULT; } /* propagate r1-r5 to the caller */ @@ -4331,13 +4327,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, * not actually arguments passed directly to callback subprogs */ if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { - verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "callback unexpected regs %x", + bt_reg_mask(bt)); return -EFAULT; } if (bt_stack_mask(bt) != 0) { - verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug (callback leftover stack slots)"); + verifier_bug(env, "callback leftover stack slots %llx", + bt_stack_mask(bt)); return -EFAULT; } /* clear r1-r5 in callback subprog's mask */ @@ -4356,11 +4352,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, /* regular helper call sets R0 */ bt_clear_reg(bt, BPF_REG_0); if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { - /* if backtracing was looking for registers R1-R5 + /* if backtracking was looking for registers R1-R5 * they should have been found already. */ - verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "backtracking call unexpected regs %x", + bt_reg_mask(bt)); return -EFAULT; } } else if (opcode == BPF_EXIT) { @@ -4378,8 +4374,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, for (i = BPF_REG_1; i <= BPF_REG_5; i++) bt_clear_reg(bt, i); if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { - verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "backtracking exit unexpected regs %x", + bt_reg_mask(bt)); return -EFAULT; } @@ -4720,9 +4716,8 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) return 0; } - verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n", - st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "backtracking func entry subprog %d reg_mask %x stack_mask %llx", + st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); return -EFAULT; } @@ -4758,8 +4753,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) * It means the backtracking missed the spot where * particular register was initialized with a constant. */ - verbose(env, "BUG backtracking idx %d\n", i); - WARN_ONCE(1, "verifier backtracking bug"); + verifier_bug(env, "backtracking idx %d", i); return -EFAULT; } } @@ -4784,12 +4778,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); for_each_set_bit(i, mask, 64) { - if (i >= func->allocated_stack / BPF_REG_SIZE) { - verbose(env, "BUG backtracking (stack slot %d, total slots %d)\n", - i, func->allocated_stack / BPF_REG_SIZE); - WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)"); + if (verifier_bug_if(i >= func->allocated_stack / BPF_REG_SIZE, + env, "stack slot %d, total slots %d", + i, func->allocated_stack / BPF_REG_SIZE)) return -EFAULT; - } if (!is_spilled_scalar_reg(&func->stack[i])) { bt_clear_frame_slot(bt, fr, i); @@ -6562,21 +6554,18 @@ continue_func: /* find the callee */ next_insn = i + insn[i].imm + 1; sidx = find_subprog(env, next_insn); - if (sidx < 0) { - WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", - next_insn); + if (verifier_bug_if(sidx < 0, env, "callee not found at insn %d", next_insn)) return -EFAULT; - } if (subprog[sidx].is_async_cb) { if (subprog[sidx].has_tail_call) { - verbose(env, "verifier bug. subprog has tail_call and async cb\n"); + verifier_bug(env, "subprog has tail_call and async cb"); return -EFAULT; } /* async callbacks don't increase bpf prog stack size unless called directly */ if (!bpf_pseudo_call(insn + i)) continue; if (subprog[sidx].is_exception_cb) { - verbose(env, "insn %d cannot call exception cb directly\n", i); + verbose(env, "insn %d cannot call exception cb directly", i); return -EINVAL; } } @@ -6676,11 +6665,8 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env, int start = idx + insn->imm + 1, subprog; subprog = find_subprog(env, start); - if (subprog < 0) { - WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", - start); + if (verifier_bug_if(subprog < 0, env, "get stack depth: no program at insn %d", start)) return -EFAULT; - } return env->subprog_info[subprog].stack_depth; } #endif @@ -7985,7 +7971,7 @@ static int check_stack_range_initialized( slot = -i - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) { - verbose(env, "verifier bug: allocated_stack too small\n"); + verbose(env, "allocated_stack too small\n"); return -EFAULT; } @@ -8414,7 +8400,7 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno, return -EINVAL; } if (meta->map_ptr) { - verbose(env, "verifier bug. Two map pointers in a timer helper\n"); + verifier_bug(env, "Two map pointers in a timer helper"); return -EFAULT; } meta->map_uid = reg->map_uid; @@ -10286,8 +10272,7 @@ static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int calls } if (state->frame[state->curframe + 1]) { - verbose(env, "verifier bug. Frame %d already allocated\n", - state->curframe + 1); + verifier_bug(env, "Frame %d already allocated", state->curframe + 1); return -EFAULT; } @@ -10401,8 +10386,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, if (err) return err; } else { - bpf_log(log, "verifier bug: unrecognized arg#%d type %d\n", - i, arg->arg_type); + verifier_bug(env, "unrecognized arg#%d type %d", i, arg->arg_type); return -EFAULT; } } @@ -10465,13 +10449,13 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins env->subprog_info[subprog].is_cb = true; if (bpf_pseudo_kfunc_call(insn) && !is_callback_calling_kfunc(insn->imm)) { - verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", - func_id_name(insn->imm), insn->imm); + verifier_bug(env, "kfunc %s#%d not marked as callback-calling", + func_id_name(insn->imm), insn->imm); return -EFAULT; } else if (!bpf_pseudo_kfunc_call(insn) && !is_callback_calling_function(insn->imm)) { /* helper */ - verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", - func_id_name(insn->imm), insn->imm); + verifier_bug(env, "helper %s#%d not marked as callback-calling", + func_id_name(insn->imm), insn->imm); return -EFAULT; } @@ -10523,10 +10507,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, target_insn = *insn_idx + insn->imm + 1; subprog = find_subprog(env, target_insn); - if (subprog < 0) { - verbose(env, "verifier bug. No program starts at insn %d\n", target_insn); + if (verifier_bug_if(subprog < 0, env, "target of func call at insn %d is not a program", + target_insn)) return -EFAULT; - } caller = state->frame[state->curframe]; err = btf_check_subprog_call(env, subprog, caller->regs); @@ -11125,7 +11108,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, fmt_map_off); if (err) { - verbose(env, "verifier bug\n"); + verbose(env, "failed to retrieve map value address\n"); return -EFAULT; } fmt = (char *)(long)fmt_addr + fmt_map_off; @@ -19706,10 +19689,9 @@ process_bpf_exit: return err; break; } else { - if (WARN_ON_ONCE(env->cur_state->loop_entry)) { - verbose(env, "verifier bug: env->cur_state->loop_entry != NULL\n"); + if (verifier_bug_if(env->cur_state->loop_entry, env, + "broken loop detection")) return -EFAULT; - } do_print_state = true; continue; } @@ -20767,10 +20749,9 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, if (bpf_pseudo_kfunc_call(&insn)) continue; - if (WARN_ON(load_reg == -1)) { - verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); + if (verifier_bug_if(load_reg == -1, env, + "zext_dst is set, but no reg is defined")) return -EFAULT; - } zext_patch[0] = insn; zext_patch[1].dst_reg = load_reg; @@ -21087,11 +21068,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) * propagated in any case. */ subprog = find_subprog(env, i + insn->imm + 1); - if (subprog < 0) { - WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", - i + insn->imm + 1); + if (verifier_bug_if(subprog < 0, env, "No program to jit at insn %d", + i + insn->imm + 1)) return -EFAULT; - } /* temporarily remember subprog id inside insn instead of * aux_data, since next loop will split up all insns into funcs */ @@ -22454,7 +22433,7 @@ next_insn: continue; /* We need two slots in case timed may_goto is supported. */ if (stack_slots > slots) { - verbose(env, "verifier bug: stack_slots supports may_goto only\n"); + verifier_bug(env, "stack_slots supports may_goto only"); return -EFAULT; } From b615ce5fbefb7d9bd1739b0aee54825349ae7438 Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Fri, 16 May 2025 20:55:22 +0100 Subject: [PATCH 099/135] selftests/bpf: Remove unnecessary link dependencies Remove llvm dependencies from binaries that do not use llvm libraries. Filter out libxml2 from llvm dependencies, as it seems that it is not actually used. This patch reduced link dependencies for BPF selftests. The next line was adding llvm dependencies to every target in the makefile, while the only targets that require those are test runnners (test_progs, test_progs-no_alu32,...): ``` $(OUTPUT)/$(TRUNNER_BINARY): LDLIBS += $$(LLVM_LDLIBS) ``` Before this change: ldd linux/tools/testing/selftests/bpf/veristat linux-vdso.so.1 (0x00007ffd2c3fd000) libelf.so.1 => /lib64/libelf.so.1 (0x00007fe1dcf89000) libz.so.1 => /lib64/libz.so.1 (0x00007fe1dcf6f000) libm.so.6 => /lib64/libm.so.6 (0x00007fe1dce94000) libzstd.so.1 => /lib64/libzstd.so.1 (0x00007fe1dcddd000) libxml2.so.2 => /lib64/libxml2.so.2 (0x00007fe1dcc54000) libstdc++.so.6 => /lib64/libstdc++.so.6 (0x00007fe1dca00000) libc.so.6 => /lib64/libc.so.6 (0x00007fe1dc600000) /lib64/ld-linux-x86-64.so.2 (0x00007fe1dcfb1000) liblzma.so.5 => /lib64/liblzma.so.5 (0x00007fe1dc9d4000) libgcc_s.so.1 => /lib64/libgcc_s.so.1 (0x00007fe1dcc38000) After: ldd linux/tools/testing/selftests/bpf/veristat linux-vdso.so.1 (0x00007ffc83370000) libelf.so.1 => /lib64/libelf.so.1 (0x00007f4b87515000) libz.so.1 => /lib64/libz.so.1 (0x00007f4b874fb000) libc.so.6 => /lib64/libc.so.6 (0x00007f4b87200000) libzstd.so.1 => /lib64/libzstd.so.1 (0x00007f4b87444000) /lib64/ld-linux-x86-64.so.2 (0x00007f4b8753d000) Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250516195522.311769-1-mykyta.yatsenko5@gmail.com --- tools/testing/selftests/bpf/Makefile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 0d04cf54068e..27db3bf20c21 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -180,7 +180,7 @@ ifeq ($(feature-llvm),1) # Prefer linking statically if it's available, otherwise fallback to shared ifeq ($(shell $(LLVM_CONFIG) --link-static --libs >/dev/null 2>&1 && echo static),static) LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --libs $(LLVM_CONFIG_LIB_COMPONENTS)) - LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS)) + LLVM_LDLIBS += $(filter-out -lxml2,$(shell $(LLVM_CONFIG) --link-static --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))) LLVM_LDLIBS += -lstdc++ else LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --link-shared --libs $(LLVM_CONFIG_LIB_COMPONENTS)) @@ -675,9 +675,6 @@ ifneq ($2:$(OUTPUT),:$(shell pwd)) $(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/ endif -$(OUTPUT)/$(TRUNNER_BINARY): LDLIBS += $$(LLVM_LDLIBS) -$(OUTPUT)/$(TRUNNER_BINARY): LDFLAGS += $$(LLVM_LDFLAGS) - # some X.test.o files have runtime dependencies on Y.bpf.o files $(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS) @@ -688,7 +685,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(OUTPUT)/veristat \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) - $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LDFLAGS) -o $$@ + $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LLVM_LDLIBS) $$(LDFLAGS) $$(LLVM_LDFLAGS) -o $$@ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \ $(OUTPUT)/$(if $2,$2/)bpftool From 4e29128a9acec2a622734844bedee013e2901bdf Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 19 May 2025 17:59:34 +0100 Subject: [PATCH 100/135] libbpf/btf: Fix string handling to support multi-split BTF libbpf handling of split BTF has been written largely with the assumption that multiple splits are possible, i.e. split BTF on top of split BTF on top of base BTF. One area where this does not quite work is string handling in split BTF; the start string offset should be the base BTF string section length + the base BTF string offset. This worked in the past because for a single split BTF with base the start string offset was always 0. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250519165935.261614-2-alan.maguire@oracle.com --- tools/lib/bpf/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index f18d7e6a453c..8d0d0b645a75 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -996,7 +996,7 @@ static struct btf *btf_new_empty(struct btf *base_btf) if (base_btf) { btf->base_btf = base_btf; btf->start_id = btf__type_cnt(base_btf); - btf->start_str_off = base_btf->hdr->str_len; + btf->start_str_off = base_btf->hdr->str_len + base_btf->start_str_off; btf->swapped_endian = base_btf->swapped_endian; } From 02f5e7c1f3ea18c99403b23b8bb400bd07b0c456 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 19 May 2025 17:59:35 +0100 Subject: [PATCH 101/135] selftests/bpf: Test multi-split BTF Extend split BTF test to cover case where we create split BTF on top of existing split BTF and add info to it; ensure that such BTF can be created and handled by searching within it, dumping/comparing to expected. Signed-off-by: Alan Maguire Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250519165935.261614-3-alan.maguire@oracle.com --- .../selftests/bpf/prog_tests/btf_split.c | 58 +++++++++++++++++-- 1 file changed, 52 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_split.c b/tools/testing/selftests/bpf/prog_tests/btf_split.c index eef1158676ed..3696fb9a05ed 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_split.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_split.c @@ -12,10 +12,11 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args) vfprintf(ctx, fmt, args); } -void test_btf_split() { +static void __test_btf_split(bool multi) +{ struct btf_dump *d = NULL; const struct btf_type *t; - struct btf *btf1, *btf2; + struct btf *btf1, *btf2, *btf3 = NULL; int str_off, i, err; btf1 = btf__new_empty(); @@ -63,14 +64,46 @@ void test_btf_split() { ASSERT_EQ(btf_vlen(t), 3, "split_struct_vlen"); ASSERT_STREQ(btf__str_by_offset(btf2, t->name_off), "s2", "split_struct_name"); + if (multi) { + btf3 = btf__new_empty_split(btf2); + if (!ASSERT_OK_PTR(btf3, "multi_split_btf")) + goto cleanup; + } else { + btf3 = btf2; + } + + btf__add_union(btf3, "u1", 16); /* [5] union u1 { */ + btf__add_field(btf3, "f1", 4, 0, 0); /* struct s2 f1; */ + btf__add_field(btf3, "uf2", 1, 0, 0); /* int f2; */ + /* } */ + + if (multi) { + t = btf__type_by_id(btf2, 5); + ASSERT_NULL(t, "multisplit_type_in_first_split"); + } + + t = btf__type_by_id(btf3, 5); + if (!ASSERT_OK_PTR(t, "split_union_type")) + goto cleanup; + ASSERT_EQ(btf_is_union(t), true, "split_union_kind"); + ASSERT_EQ(btf_vlen(t), 2, "split_union_vlen"); + ASSERT_STREQ(btf__str_by_offset(btf3, t->name_off), "u1", "split_union_name"); + ASSERT_EQ(btf__type_cnt(btf3), 6, "split_type_cnt"); + + t = btf__type_by_id(btf3, 1); + if (!ASSERT_OK_PTR(t, "split_base_type")) + goto cleanup; + ASSERT_EQ(btf_is_int(t), true, "split_base_int"); + ASSERT_STREQ(btf__str_by_offset(btf3, t->name_off), "int", "split_base_type_name"); + /* BTF-to-C dump of split BTF */ dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz); if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream")) return; - d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL); + d = btf_dump__new(btf3, btf_dump_printf, dump_buf_file, NULL); if (!ASSERT_OK_PTR(d, "btf_dump__new")) goto cleanup; - for (i = 1; i < btf__type_cnt(btf2); i++) { + for (i = 1; i < btf__type_cnt(btf3); i++) { err = btf_dump__dump_type(d, i); ASSERT_OK(err, "dump_type_ok"); } @@ -79,12 +112,15 @@ void test_btf_split() { ASSERT_STREQ(dump_buf, "struct s1 {\n" " int f1;\n" -"};\n" -"\n" +"};\n\n" "struct s2 {\n" " struct s1 f1;\n" " int f2;\n" " int *f3;\n" +"};\n\n" +"union u1 {\n" +" struct s2 f1;\n" +" int uf2;\n" "};\n\n", "c_dump"); cleanup: @@ -94,4 +130,14 @@ cleanup: btf_dump__free(d); btf__free(btf1); btf__free(btf2); + if (btf2 != btf3) + btf__free(btf3); +} + +void test_btf_split(void) +{ + if (test__start_subtest("single_split")) + __test_btf_split(false); + if (test__start_subtest("multi_split")) + __test_btf_split(true); } From 4e2e6841ff761cc15a54e8bebcf35d7325ec78a2 Mon Sep 17 00:00:00 2001 From: Di Shen Date: Tue, 20 May 2025 13:49:43 +0800 Subject: [PATCH 102/135] bpf: Revert "bpf: remove unnecessary rcu_read_{lock,unlock}() in multi-uprobe attach logic" This reverts commit 4a8f635a6054. Althought get_pid_task() internally already calls rcu_read_lock() and rcu_read_unlock(), the find_vpid() was not. The documentation for find_vpid() clearly states: "Must be called with the tasklist_lock or rcu_read_lock() held." Add proper rcu_read_lock/unlock() to protect the find_vpid(). Fixes: 4a8f635a6054 ("bpf: remove unnecessary rcu_read_{lock,unlock}() in multi-uprobe attach logic") Reported-by: Xuewen Yan Signed-off-by: Di Shen Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20250520054943.5002-1-xuewen.yan@unisoc.com Signed-off-by: Alexei Starovoitov --- kernel/trace/bpf_trace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b0eb721fcfb5..289ff0caa83e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3318,7 +3318,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr } if (pid) { + rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_TGID); + rcu_read_unlock(); if (!task) { err = -ESRCH; goto error_path_put; From f7562001c8b854390899b53d06ba4202c89339e6 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 19 May 2025 23:30:04 +0100 Subject: [PATCH 103/135] s390: always declare expoline thunks It would be convenient to use the following pattern in the BPF JIT: if (nospec_uses_trampoline()) emit_call(__s390_indirect_jump_r1); Unfortunately with CONFIG_EXPOLINE=n the compiler complains about the missing prototype of __s390_indirect_jump_r1(). One could wrap the whole "if" statement in an #ifdef, but this clutters the code. Instead, declare expoline thunk prototypes even when compiling without expolines. When using the above code structure and compiling without expolines, references to them are optimized away, and there are no linker errors. Acked-by: Heiko Carstens Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250519223646.66382-2-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/include/asm/nospec-branch.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h index 192835a3e24d..c7c96282f011 100644 --- a/arch/s390/include/asm/nospec-branch.h +++ b/arch/s390/include/asm/nospec-branch.h @@ -26,8 +26,6 @@ static inline bool nospec_uses_trampoline(void) return __is_defined(CC_USING_EXPOLINE) && !nospec_disable; } -#ifdef CONFIG_EXPOLINE_EXTERN - void __s390_indirect_jump_r1(void); void __s390_indirect_jump_r2(void); void __s390_indirect_jump_r3(void); @@ -44,8 +42,6 @@ void __s390_indirect_jump_r13(void); void __s390_indirect_jump_r14(void); void __s390_indirect_jump_r15(void); -#endif - #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_EXPOLINE_H */ From 9053ba042fc7c0e718566932288cc88b3bb2dbe1 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 19 May 2025 23:30:05 +0100 Subject: [PATCH 104/135] s390/bpf: Add macros for calling external functions After the V!=R rework (commit c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces")), kernel and BPF programs are allocated within a 4G region, making it possible to use relative addressing to directly use kernel functions from BPF code. Add two new macros for calling kernel functions from BPF code: EMIT6_PCREL_RILB_PTR() and EMIT6_PCREL_RILC_PTR(). Factor out parts of the existing macros that are helpful for implementing the new ones. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250519223646.66382-3-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 62 +++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6f22b5199c20..70a93e03bfb3 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -127,6 +127,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) jit->seen_regs |= (1 << r1); } +static s32 off_to_pcrel(struct bpf_jit *jit, u32 off) +{ + return off - jit->prg; +} + +static s64 ptr_to_pcrel(struct bpf_jit *jit, const void *ptr) +{ + if (jit->prg_buf) + return (const u8 *)ptr - ((const u8 *)jit->prg_buf + jit->prg); + return 0; +} + #define REG_SET_SEEN(b1) \ ({ \ reg_set_seen(jit, b1); \ @@ -201,7 +213,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT4_PCREL_RIC(op, mask, target) \ ({ \ - int __rel = ((target) - jit->prg) / 2; \ + int __rel = off_to_pcrel(jit, target) / 2; \ _EMIT4((op) | (mask) << 20 | (__rel & 0xffff)); \ }) @@ -239,7 +251,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \ ({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ + unsigned int rel = off_to_pcrel(jit, target) / 2; \ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \ (op2) | (mask) << 12); \ REG_SET_SEEN(b1); \ @@ -248,7 +260,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \ ({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ + unsigned int rel = off_to_pcrel(jit, target) / 2; \ _EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \ (rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \ REG_SET_SEEN(b1); \ @@ -257,29 +269,41 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) #define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \ ({ \ - int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \ + int rel = off_to_pcrel(jit, addrs[(i) + (off) + 1]) / 2;\ _EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\ REG_SET_SEEN(b1); \ REG_SET_SEEN(b2); \ }) -#define EMIT6_PCREL_RILB(op, b, target) \ -({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ - _EMIT6((op) | reg_high(b) << 16 | rel >> 16, rel & 0xffff);\ - REG_SET_SEEN(b); \ -}) +static void emit6_pcrel_ril(struct bpf_jit *jit, u32 op, s64 pcrel) +{ + u32 pc32dbl = (s32)(pcrel / 2); -#define EMIT6_PCREL_RIL(op, target) \ -({ \ - unsigned int rel = (int)((target) - jit->prg) / 2; \ - _EMIT6((op) | rel >> 16, rel & 0xffff); \ -}) + _EMIT6(op | pc32dbl >> 16, pc32dbl & 0xffff); +} + +static void emit6_pcrel_rilb(struct bpf_jit *jit, u32 op, u8 b, s64 pcrel) +{ + emit6_pcrel_ril(jit, op | reg_high(b) << 16, pcrel); + REG_SET_SEEN(b); +} + +#define EMIT6_PCREL_RILB(op, b, target) \ + emit6_pcrel_rilb(jit, op, b, off_to_pcrel(jit, target)) + +#define EMIT6_PCREL_RILB_PTR(op, b, target_ptr) \ + emit6_pcrel_rilb(jit, op, b, ptr_to_pcrel(jit, target_ptr)) + +static void emit6_pcrel_rilc(struct bpf_jit *jit, u32 op, u8 mask, s64 pcrel) +{ + emit6_pcrel_ril(jit, op | mask << 20, pcrel); +} #define EMIT6_PCREL_RILC(op, mask, target) \ -({ \ - EMIT6_PCREL_RIL((op) | (mask) << 20, (target)); \ -}) + emit6_pcrel_rilc(jit, op, mask, off_to_pcrel(jit, target)) + +#define EMIT6_PCREL_RILC_PTR(op, mask, target_ptr) \ + emit6_pcrel_rilc(jit, op, mask, ptr_to_pcrel(jit, target_ptr)) #define _EMIT6_IMM(op, imm) \ ({ \ @@ -503,7 +527,7 @@ static void bpf_skip(struct bpf_jit *jit, int size) { if (size >= 6 && !is_valid_rel(size)) { /* brcl 0xf,size */ - EMIT6_PCREL_RIL(0xc0f4000000, size); + EMIT6_PCREL_RILC(0xc0040000, 0xf, size); size -= 6; } else if (size >= 4 && is_valid_rel(size)) { /* brc 0xf,size */ From 7f332f9fe9d854c1680fea223b6cb04530859a1a Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 19 May 2025 23:30:06 +0100 Subject: [PATCH 105/135] s390/bpf: Use kernel's expoline thunks Simplify the JIT code by replacing the custom expolines with the ones defined in the kernel text. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20250519223646.66382-4-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov --- arch/s390/net/bpf_jit_comp.c | 61 ++++++++++-------------------------- 1 file changed, 17 insertions(+), 44 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 70a93e03bfb3..c7f8313ba449 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -48,8 +48,6 @@ struct bpf_jit { int lit64; /* Current position in 64-bit literal pool */ int base_ip; /* Base address for literal pool */ int exit_ip; /* Address of exit */ - int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */ - int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */ int tail_call_start; /* Tail call start offset */ int excnt; /* Number of exception table entries */ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ @@ -642,28 +640,17 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* - * Emit an expoline for a jump that follows + * Jump using a register either directly or via an expoline thunk */ -static void emit_expoline(struct bpf_jit *jit) -{ - /* exrl %r0,.+10 */ - EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10); - /* j . */ - EMIT4_PCREL(0xa7f40000, 0); -} - -/* - * Emit __s390_indirect_jump_r1 thunk if necessary - */ -static void emit_r1_thunk(struct bpf_jit *jit) -{ - if (nospec_uses_trampoline()) { - jit->r1_thunk_ip = jit->prg; - emit_expoline(jit); - /* br %r1 */ - _EMIT2(0x07f1); - } -} +#define EMIT_JUMP_REG(reg) do { \ + if (nospec_uses_trampoline()) \ + /* brcl 0xf,__s390_indirect_jump_rN */ \ + EMIT6_PCREL_RILC_PTR(0xc0040000, 0x0f, \ + __s390_indirect_jump_r ## reg); \ + else \ + /* br %rN */ \ + _EMIT2(0x07f0 | reg); \ +} while (0) /* * Call r1 either directly or via __s390_indirect_jump_r1 thunk @@ -672,7 +659,8 @@ static void call_r1(struct bpf_jit *jit) { if (nospec_uses_trampoline()) /* brasl %r14,__s390_indirect_jump_r1 */ - EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip); + EMIT6_PCREL_RILB_PTR(0xc0050000, REG_14, + __s390_indirect_jump_r1); else /* basr %r14,%r1 */ EMIT2(0x0d00, REG_14, REG_1); @@ -688,16 +676,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); - if (nospec_uses_trampoline()) { - jit->r14_thunk_ip = jit->prg; - /* Generate __s390_indirect_jump_r14 thunk */ - emit_expoline(jit); - } - /* br %r14 */ - _EMIT2(0x07fe); - - if (is_first_pass(jit) || (jit->seen & SEEN_FUNC)) - emit_r1_thunk(jit); + EMIT_JUMP_REG(14); jit->prg = ALIGN(jit->prg, 8); jit->prologue_plt = jit->prg; @@ -1899,7 +1878,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* aghi %r1,tail_call_start */ EMIT4_IMM(0xa70b0000, REG_1, jit->tail_call_start); /* brcl 0xf,__s390_indirect_jump_r1 */ - EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->r1_thunk_ip); + EMIT6_PCREL_RILC_PTR(0xc0040000, 0xf, + __s390_indirect_jump_r1); } else { /* bc 0xf,tail_call_start(%r1) */ _EMIT4(0x47f01000 + jit->tail_call_start); @@ -2868,17 +2848,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, 0xf000 | tjit->tccnt_off); /* aghi %r15,stack_size */ EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size); - /* Emit an expoline for the following indirect jump. */ - if (nospec_uses_trampoline()) - emit_expoline(jit); if (flags & BPF_TRAMP_F_SKIP_FRAME) - /* br %r14 */ - _EMIT2(0x07fe); + EMIT_JUMP_REG(14); else - /* br %r1 */ - _EMIT2(0x07f1); - - emit_r1_thunk(jit); + EMIT_JUMP_REG(1); return 0; } From 5ead949920c773d4c3a42988391c2e6d0f32650f Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Thu, 22 May 2025 02:38:13 +0100 Subject: [PATCH 106/135] selftests/bpf: Add SKIP_LLVM makefile variable Introduce SKIP_LLVM makefile variable that allows to avoid using llvm dependencies when building BPF selftests. This is different from existing feature-llvm, as the latter is a result of automatic detection and should not be set by user explicitly. Avoiding llvm dependencies could be useful for environments that do not have them, given that as of now llvm dependencies are required only by jit_disasm_helpers.c. Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250522013813.125428-1-mykyta.yatsenko5@gmail.com --- tools/testing/selftests/bpf/Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 27db3bf20c21..cf5ed3bee573 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -34,6 +34,9 @@ OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0) LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null) LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) +SKIP_DOCS ?= +SKIP_LLVM ?= + ifeq ($(srctree),) srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) @@ -172,6 +175,7 @@ override OUTPUT := $(patsubst %/,%,$(OUTPUT)) endif endif +ifneq ($(SKIP_LLVM),1) ifeq ($(feature-llvm),1) LLVM_CFLAGS += -DHAVE_LLVM_SUPPORT LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets @@ -187,6 +191,7 @@ ifeq ($(feature-llvm),1) endif LLVM_LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags) endif +endif SCRATCH_DIR := $(OUTPUT)/tools BUILD_DIR := $(SCRATCH_DIR)/build From fb1131d5e181604a6ef06a691709213decfe42bd Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:24 +0200 Subject: [PATCH 107/135] selftests/bpf: Support af_unix SOCK_DGRAM socket pair creation Handle af_unix in init_addr_loopback(). For pair creation, bind() the peer socket to make SOCK_DGRAM connect() happy. Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Reviewed-by: Jakub Sitnicki Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-1-a1ea723f7e7e@rbox.co --- .../selftests/bpf/prog_tests/socket_helpers.h | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h index 1bdfb79ef009..e505c2f2595a 100644 --- a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h @@ -3,6 +3,7 @@ #ifndef __SOCKET_HELPERS__ #define __SOCKET_HELPERS__ +#include #include /* include/linux/net.h */ @@ -169,6 +170,15 @@ static inline void init_addr_loopback6(struct sockaddr_storage *ss, *len = sizeof(*addr6); } +static inline void init_addr_loopback_unix(struct sockaddr_storage *ss, + socklen_t *len) +{ + struct sockaddr_un *addr = memset(ss, 0, sizeof(*ss)); + + addr->sun_family = AF_UNIX; + *len = sizeof(sa_family_t); +} + static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss, socklen_t *len) { @@ -190,6 +200,9 @@ static inline void init_addr_loopback(int family, struct sockaddr_storage *ss, case AF_INET6: init_addr_loopback6(ss, len); return; + case AF_UNIX: + init_addr_loopback_unix(ss, len); + return; case AF_VSOCK: init_addr_loopback_vsock(ss, len); return; @@ -315,21 +328,27 @@ static inline int create_pair(int family, int sotype, int *p0, int *p1) { __close_fd int s, c = -1, p = -1; struct sockaddr_storage addr; - socklen_t len = sizeof(addr); + socklen_t len; int err; s = socket_loopback(family, sotype); if (s < 0) return s; - err = xgetsockname(s, sockaddr(&addr), &len); - if (err) - return err; - c = xsocket(family, sotype, 0); if (c < 0) return c; + init_addr_loopback(family, &addr, &len); + err = xbind(c, sockaddr(&addr), len); + if (err) + return err; + + len = sizeof(addr); + err = xgetsockname(s, sockaddr(&addr), &len); + if (err) + return err; + err = connect(c, sockaddr(&addr), len); if (err) { if (errno != EINPROGRESS) { From d87857946ded5acfe273404c4126f75d8e7619c8 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:25 +0200 Subject: [PATCH 108/135] selftests/bpf: Add socket_kind_to_str() to socket_helpers Add function that returns string representation of socket's domain/type. Suggested-by: Jakub Sitnicki Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Reviewed-by: Jakub Sitnicki Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-2-a1ea723f7e7e@rbox.co --- .../selftests/bpf/prog_tests/socket_helpers.h | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h index e505c2f2595a..e02cabcc814e 100644 --- a/tools/testing/selftests/bpf/prog_tests/socket_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/socket_helpers.h @@ -410,4 +410,59 @@ static inline int create_socket_pairs(int family, int sotype, int *c0, int *c1, return err; } +static inline const char *socket_kind_to_str(int sock_fd) +{ + socklen_t opt_len; + int domain, type; + + opt_len = sizeof(domain); + if (getsockopt(sock_fd, SOL_SOCKET, SO_DOMAIN, &domain, &opt_len)) + FAIL_ERRNO("getsockopt(SO_DOMAIN)"); + + opt_len = sizeof(type); + if (getsockopt(sock_fd, SOL_SOCKET, SO_TYPE, &type, &opt_len)) + FAIL_ERRNO("getsockopt(SO_TYPE)"); + + switch (domain) { + case AF_INET: + switch (type) { + case SOCK_STREAM: + return "tcp4"; + case SOCK_DGRAM: + return "udp4"; + } + break; + case AF_INET6: + switch (type) { + case SOCK_STREAM: + return "tcp6"; + case SOCK_DGRAM: + return "udp6"; + } + break; + case AF_UNIX: + switch (type) { + case SOCK_STREAM: + return "u_str"; + case SOCK_DGRAM: + return "u_dgr"; + case SOCK_SEQPACKET: + return "u_seq"; + } + break; + case AF_VSOCK: + switch (type) { + case SOCK_STREAM: + return "v_str"; + case SOCK_DGRAM: + return "v_dgr"; + case SOCK_SEQPACKET: + return "v_seq"; + } + break; + } + + return "???"; +} + #endif // __SOCKET_HELPERS__ From b57482b0fe8e370e2895b9925a733c21cfec183a Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:26 +0200 Subject: [PATCH 109/135] selftests/bpf: Add u32()/u64() to sockmap_helpers Add integer wrappers for convenient sockmap usage. While there, fix misaligned trailing slashes. Suggested-by: Jakub Sitnicki Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Reviewed-by: Jakub Sitnicki Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-3-a1ea723f7e7e@rbox.co --- .../bpf/prog_tests/sockmap_helpers.h | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h index 3e5571dd578d..d815efac52fd 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h @@ -5,12 +5,15 @@ #define MAX_TEST_NAME 80 +#define u32(v) ((u32){(v)}) +#define u64(v) ((u64){(v)}) + #define __always_unused __attribute__((__unused__)) #define xbpf_map_delete_elem(fd, key) \ ({ \ int __ret = bpf_map_delete_elem((fd), (key)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("map_delete"); \ __ret; \ }) @@ -18,7 +21,7 @@ #define xbpf_map_lookup_elem(fd, key, val) \ ({ \ int __ret = bpf_map_lookup_elem((fd), (key), (val)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("map_lookup"); \ __ret; \ }) @@ -26,7 +29,7 @@ #define xbpf_map_update_elem(fd, key, val, flags) \ ({ \ int __ret = bpf_map_update_elem((fd), (key), (val), (flags)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("map_update"); \ __ret; \ }) @@ -35,7 +38,7 @@ ({ \ int __ret = \ bpf_prog_attach((prog), (target), (type), (flags)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("prog_attach(" #type ")"); \ __ret; \ }) @@ -43,7 +46,7 @@ #define xbpf_prog_detach2(prog, target, type) \ ({ \ int __ret = bpf_prog_detach2((prog), (target), (type)); \ - if (__ret < 0) \ + if (__ret < 0) \ FAIL_ERRNO("prog_detach2(" #type ")"); \ __ret; \ }) @@ -66,21 +69,15 @@ __ret; \ }) -static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2) +static inline int add_to_sockmap(int mapfd, int fd1, int fd2) { - u64 value; - u32 key; int err; - key = 0; - value = fd1; - err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); + err = xbpf_map_update_elem(mapfd, &u32(0), &u64(fd1), BPF_NOEXIST); if (err) return err; - key = 1; - value = fd2; - return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST); + return xbpf_map_update_elem(mapfd, &u32(1), &u64(fd2), BPF_NOEXIST); } #endif // __SOCKMAP_HELPERS__ From f266905bb3d828e678e80d235fbd3e46d8c04a50 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:27 +0200 Subject: [PATCH 110/135] selftests/bpf: Introduce verdict programs for sockmap_redir Instead of piggybacking on test_sockmap_listen, introduce test_sockmap_redir especially for sockmap redirection tests. Suggested-by: Jiayuan Chen Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-4-a1ea723f7e7e@rbox.co --- .../selftests/bpf/progs/test_sockmap_redir.c | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/test_sockmap_redir.c diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_redir.c b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c new file mode 100644 index 000000000000..34d9f4f2f0a2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sockmap_redir.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nop_map, sock_map; + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nop_hash, sock_hash; + +SEC(".maps") struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, unsigned int); +} verdict_map; + +/* Set by user space */ +int redirect_type; +int redirect_flags; + +#define redirect_map(__data) \ + _Generic((__data), \ + struct __sk_buff * : bpf_sk_redirect_map, \ + struct sk_msg_md * : bpf_msg_redirect_map \ + )((__data), &sock_map, (__u32){0}, redirect_flags) + +#define redirect_hash(__data) \ + _Generic((__data), \ + struct __sk_buff * : bpf_sk_redirect_hash, \ + struct sk_msg_md * : bpf_msg_redirect_hash \ + )((__data), &sock_hash, &(__u32){0}, redirect_flags) + +#define DEFINE_PROG(__type, __param) \ +SEC("sk_" XSTR(__type)) \ +int prog_ ## __type ## _verdict(__param data) \ +{ \ + unsigned int *count; \ + int verdict; \ + \ + if (redirect_type == BPF_MAP_TYPE_SOCKMAP) \ + verdict = redirect_map(data); \ + else if (redirect_type == BPF_MAP_TYPE_SOCKHASH) \ + verdict = redirect_hash(data); \ + else \ + verdict = redirect_type - __MAX_BPF_MAP_TYPE; \ + \ + count = bpf_map_lookup_elem(&verdict_map, &verdict); \ + if (count) \ + (*count)++; \ + \ + return verdict; \ +} + +DEFINE_PROG(skb, struct __sk_buff *); +DEFINE_PROG(msg, struct sk_msg_md *); + +char _license[] SEC("license") = "GPL"; From f0709263a07ef076ac4e55f0d68e04d521b2184c Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:28 +0200 Subject: [PATCH 111/135] selftests/bpf: Add selftest for sockmap/hashmap redirection Test redirection logic. All supported and unsupported redirect combinations are tested for success and failure respectively. BPF_MAP_TYPE_SOCKMAP BPF_MAP_TYPE_SOCKHASH x sk_msg-to-egress sk_msg-to-ingress sk_skb-to-egress sk_skb-to-ingress x AF_INET, SOCK_STREAM AF_INET6, SOCK_STREAM AF_INET, SOCK_DGRAM AF_INET6, SOCK_DGRAM AF_UNIX, SOCK_STREAM AF_UNIX, SOCK_DGRAM AF_VSOCK, SOCK_STREAM AF_VSOCK, SOCK_SEQPACKET Suggested-by: Jakub Sitnicki Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-5-a1ea723f7e7e@rbox.co --- .../selftests/bpf/prog_tests/sockmap_redir.c | 465 ++++++++++++++++++ 1 file changed, 465 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/sockmap_redir.c diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c b/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c new file mode 100644 index 000000000000..9c461d93113d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_redir.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test for sockmap/sockhash redirection. + * + * BPF_MAP_TYPE_SOCKMAP + * BPF_MAP_TYPE_SOCKHASH + * x + * sk_msg-to-egress + * sk_msg-to-ingress + * sk_skb-to-egress + * sk_skb-to-ingress + * x + * AF_INET, SOCK_STREAM + * AF_INET6, SOCK_STREAM + * AF_INET, SOCK_DGRAM + * AF_INET6, SOCK_DGRAM + * AF_UNIX, SOCK_STREAM + * AF_UNIX, SOCK_DGRAM + * AF_VSOCK, SOCK_STREAM + * AF_VSOCK, SOCK_SEQPACKET + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "linux/const.h" +#include "test_progs.h" +#include "sockmap_helpers.h" +#include "test_sockmap_redir.skel.h" + +/* The meaning of SUPPORTED is "will redirect packet as expected". + */ +#define SUPPORTED _BITUL(0) + +/* Note on sk_skb-to-ingress ->af_vsock: + * + * Peer socket may receive the packet some time after the return from sendmsg(). + * In a typical usage scenario, recvmsg() will block until the redirected packet + * appears in the destination queue, or timeout if the packet was dropped. By + * that point, the verdict map has already been updated to reflect what has + * happened. + * + * But sk_skb-to-ingress/af_vsock is an unsupported combination, so no recvmsg() + * takes place. Which means we may race the execution of the verdict logic and + * read map_verd before it has been updated, i.e. we might observe + * map_verd[SK_DROP]=0 instead of map_verd[SK_DROP]=1. + * + * This confuses the selftest logic: if there was no packet dropped, where's the + * packet? So here's a heuristic: on map_verd[SK_DROP]=map_verd[SK_PASS]=0 + * (which implies the verdict program has not been ran) just re-read the verdict + * map again. + */ +#define UNSUPPORTED_RACY_VERD _BITUL(1) + +enum prog_type { + SK_MSG_EGRESS, + SK_MSG_INGRESS, + SK_SKB_EGRESS, + SK_SKB_INGRESS, +}; + +enum { + SEND_INNER = 0, + SEND_OUTER, +}; + +enum { + RECV_INNER = 0, + RECV_OUTER, +}; + +struct maps { + int in; + int out; + int verd; +}; + +struct combo_spec { + enum prog_type prog_type; + const char *in, *out; +}; + +struct redir_spec { + const char *name; + int idx_send; + int idx_recv; + enum prog_type prog_type; +}; + +struct socket_spec { + int family; + int sotype; + int send_flags; + int in[2]; + int out[2]; +}; + +static int socket_spec_pairs(struct socket_spec *s) +{ + return create_socket_pairs(s->family, s->sotype, + &s->in[0], &s->out[0], + &s->in[1], &s->out[1]); +} + +static void socket_spec_close(struct socket_spec *s) +{ + xclose(s->in[0]); + xclose(s->in[1]); + xclose(s->out[0]); + xclose(s->out[1]); +} + +static void get_redir_params(struct redir_spec *redir, + struct test_sockmap_redir *skel, int *prog_fd, + enum bpf_attach_type *attach_type, + int *redirect_flags) +{ + enum prog_type type = redir->prog_type; + struct bpf_program *prog; + bool sk_msg; + + sk_msg = type == SK_MSG_INGRESS || type == SK_MSG_EGRESS; + prog = sk_msg ? skel->progs.prog_msg_verdict : skel->progs.prog_skb_verdict; + + *prog_fd = bpf_program__fd(prog); + *attach_type = sk_msg ? BPF_SK_MSG_VERDICT : BPF_SK_SKB_VERDICT; + + if (type == SK_MSG_INGRESS || type == SK_SKB_INGRESS) + *redirect_flags = BPF_F_INGRESS; + else + *redirect_flags = 0; +} + +static void try_recv(const char *prefix, int fd, int flags, bool expect_success) +{ + ssize_t n; + char buf; + + errno = 0; + n = recv(fd, &buf, 1, flags); + if (n < 0 && expect_success) + FAIL_ERRNO("%s: unexpected failure: retval=%zd", prefix, n); + if (!n && !expect_success) + FAIL("%s: expected failure: retval=%zd", prefix, n); +} + +static void handle_unsupported(int sd_send, int sd_peer, int sd_in, int sd_out, + int sd_recv, int map_verd, int status) +{ + unsigned int drop, pass; + char recv_buf; + ssize_t n; + +get_verdict: + if (xbpf_map_lookup_elem(map_verd, &u32(SK_DROP), &drop) || + xbpf_map_lookup_elem(map_verd, &u32(SK_PASS), &pass)) + return; + + if (pass == 0 && drop == 0 && (status & UNSUPPORTED_RACY_VERD)) { + sched_yield(); + goto get_verdict; + } + + if (pass != 0) { + FAIL("unsupported: wanted verdict pass 0, have %u", pass); + return; + } + + /* If nothing was dropped, packet should have reached the peer */ + if (drop == 0) { + errno = 0; + n = recv_timeout(sd_peer, &recv_buf, 1, 0, IO_TIMEOUT_SEC); + if (n != 1) + FAIL_ERRNO("unsupported: packet missing, retval=%zd", n); + } + + /* Ensure queues are empty */ + try_recv("bpf.recv(sd_send)", sd_send, MSG_DONTWAIT, false); + if (sd_in != sd_send) + try_recv("bpf.recv(sd_in)", sd_in, MSG_DONTWAIT, false); + + try_recv("bpf.recv(sd_out)", sd_out, MSG_DONTWAIT, false); + if (sd_recv != sd_out) + try_recv("bpf.recv(sd_recv)", sd_recv, MSG_DONTWAIT, false); +} + +static void test_send_redir_recv(int sd_send, int send_flags, int sd_peer, + int sd_in, int sd_out, int sd_recv, + struct maps *maps, int status) +{ + unsigned int drop, pass; + char *send_buf = "ab"; + char recv_buf = '\0'; + ssize_t n, len = 1; + + /* Zero out the verdict map */ + if (xbpf_map_update_elem(maps->verd, &u32(SK_DROP), &u32(0), BPF_ANY) || + xbpf_map_update_elem(maps->verd, &u32(SK_PASS), &u32(0), BPF_ANY)) + return; + + if (xbpf_map_update_elem(maps->in, &u32(0), &u64(sd_in), BPF_NOEXIST)) + return; + + if (xbpf_map_update_elem(maps->out, &u32(0), &u64(sd_out), BPF_NOEXIST)) + goto del_in; + + /* Last byte is OOB data when send_flags has MSG_OOB bit set */ + if (send_flags & MSG_OOB) + len++; + n = send(sd_send, send_buf, len, send_flags); + if (n >= 0 && n < len) + FAIL("incomplete send"); + if (n < 0) { + /* sk_msg redirect combo not supported? */ + if (status & SUPPORTED || errno != EACCES) + FAIL_ERRNO("send"); + goto out; + } + + if (!(status & SUPPORTED)) { + handle_unsupported(sd_send, sd_peer, sd_in, sd_out, sd_recv, + maps->verd, status); + goto out; + } + + errno = 0; + n = recv_timeout(sd_recv, &recv_buf, 1, 0, IO_TIMEOUT_SEC); + if (n != 1) { + FAIL_ERRNO("recv_timeout()"); + goto out; + } + + /* Check verdict _after_ recv(); af_vsock may need time to catch up */ + if (xbpf_map_lookup_elem(maps->verd, &u32(SK_DROP), &drop) || + xbpf_map_lookup_elem(maps->verd, &u32(SK_PASS), &pass)) + goto out; + + if (drop != 0 || pass != 1) + FAIL("unexpected verdict drop/pass: wanted 0/1, have %u/%u", + drop, pass); + + if (recv_buf != send_buf[0]) + FAIL("recv(): payload check, %02x != %02x", recv_buf, send_buf[0]); + + if (send_flags & MSG_OOB) { + /* Fail reading OOB while in sockmap */ + try_recv("bpf.recv(sd_out, MSG_OOB)", sd_out, + MSG_OOB | MSG_DONTWAIT, false); + + /* Remove sd_out from sockmap */ + xbpf_map_delete_elem(maps->out, &u32(0)); + + /* Check that OOB was dropped on redirect */ + try_recv("recv(sd_out, MSG_OOB)", sd_out, + MSG_OOB | MSG_DONTWAIT, false); + + goto del_in; + } +out: + xbpf_map_delete_elem(maps->out, &u32(0)); +del_in: + xbpf_map_delete_elem(maps->in, &u32(0)); +} + +static int is_redir_supported(enum prog_type type, const char *in, + const char *out) +{ + /* Matching based on strings returned by socket_kind_to_str(): + * tcp4, udp4, tcp6, udp6, u_str, u_dgr, v_str, v_seq + * Plus a wildcard: any + * Not in use: u_seq, v_dgr + */ + struct combo_spec *c, combos[] = { + /* Send to local: TCP -> any, but vsock */ + { SK_MSG_INGRESS, "tcp", "tcp" }, + { SK_MSG_INGRESS, "tcp", "udp" }, + { SK_MSG_INGRESS, "tcp", "u_str" }, + { SK_MSG_INGRESS, "tcp", "u_dgr" }, + + /* Send to egress: TCP -> TCP */ + { SK_MSG_EGRESS, "tcp", "tcp" }, + + /* Ingress to egress: any -> any */ + { SK_SKB_EGRESS, "any", "any" }, + + /* Ingress to local: any -> any, but vsock */ + { SK_SKB_INGRESS, "any", "tcp" }, + { SK_SKB_INGRESS, "any", "udp" }, + { SK_SKB_INGRESS, "any", "u_str" }, + { SK_SKB_INGRESS, "any", "u_dgr" }, + }; + + for (c = combos; c < combos + ARRAY_SIZE(combos); c++) { + if (c->prog_type == type && + (!strcmp(c->in, "any") || strstarts(in, c->in)) && + (!strcmp(c->out, "any") || strstarts(out, c->out))) + return SUPPORTED; + } + + return 0; +} + +static int get_support_status(enum prog_type type, const char *in, + const char *out) +{ + int status = is_redir_supported(type, in, out); + + if (type == SK_SKB_INGRESS && strstarts(out, "v_")) + status |= UNSUPPORTED_RACY_VERD; + + return status; +} + +static void test_socket(enum bpf_map_type type, struct redir_spec *redir, + struct maps *maps, struct socket_spec *s_in, + struct socket_spec *s_out) +{ + int fd_in, fd_out, fd_send, fd_peer, fd_recv, flags, status; + const char *in_str, *out_str; + char s[MAX_TEST_NAME]; + + fd_in = s_in->in[0]; + fd_out = s_out->out[0]; + fd_send = s_in->in[redir->idx_send]; + fd_peer = s_in->in[redir->idx_send ^ 1]; + fd_recv = s_out->out[redir->idx_recv]; + flags = s_in->send_flags; + + in_str = socket_kind_to_str(fd_in); + out_str = socket_kind_to_str(fd_out); + status = get_support_status(redir->prog_type, in_str, out_str); + + snprintf(s, sizeof(s), + "%-4s %-17s %-5s %s %-5s%6s", + /* hash sk_skb-to-ingress u_str → v_str (OOB) */ + type == BPF_MAP_TYPE_SOCKMAP ? "map" : "hash", + redir->name, + in_str, + status & SUPPORTED ? "→" : " ", + out_str, + (flags & MSG_OOB) ? "(OOB)" : ""); + + if (!test__start_subtest(s)) + return; + + test_send_redir_recv(fd_send, flags, fd_peer, fd_in, fd_out, fd_recv, + maps, status); +} + +static void test_redir(enum bpf_map_type type, struct redir_spec *redir, + struct maps *maps) +{ + struct socket_spec *s, sockets[] = { + { AF_INET, SOCK_STREAM }, + // { AF_INET, SOCK_STREAM, MSG_OOB }, /* Known to be broken */ + { AF_INET6, SOCK_STREAM }, + { AF_INET, SOCK_DGRAM }, + { AF_INET6, SOCK_DGRAM }, + { AF_UNIX, SOCK_STREAM }, + { AF_UNIX, SOCK_STREAM, MSG_OOB }, + { AF_UNIX, SOCK_DGRAM }, + // { AF_UNIX, SOCK_SEQPACKET}, /* Unsupported BPF_MAP_UPDATE_ELEM */ + { AF_VSOCK, SOCK_STREAM }, + // { AF_VSOCK, SOCK_DGRAM }, /* Unsupported socket() */ + { AF_VSOCK, SOCK_SEQPACKET }, + }; + + for (s = sockets; s < sockets + ARRAY_SIZE(sockets); s++) + if (socket_spec_pairs(s)) + goto out; + + /* Intra-proto */ + for (s = sockets; s < sockets + ARRAY_SIZE(sockets); s++) + test_socket(type, redir, maps, s, s); + + /* Cross-proto */ + for (int i = 0; i < ARRAY_SIZE(sockets); i++) { + for (int j = 0; j < ARRAY_SIZE(sockets); j++) { + struct socket_spec *out = &sockets[j]; + struct socket_spec *in = &sockets[i]; + + /* Skip intra-proto and between variants */ + if (out->send_flags || + (in->family == out->family && + in->sotype == out->sotype)) + continue; + + test_socket(type, redir, maps, in, out); + } + } +out: + while (--s >= sockets) + socket_spec_close(s); +} + +static void test_map(enum bpf_map_type type) +{ + struct redir_spec *r, redirs[] = { + { "sk_msg-to-ingress", SEND_INNER, RECV_INNER, SK_MSG_INGRESS }, + { "sk_msg-to-egress", SEND_INNER, RECV_OUTER, SK_MSG_EGRESS }, + { "sk_skb-to-egress", SEND_OUTER, RECV_OUTER, SK_SKB_EGRESS }, + { "sk_skb-to-ingress", SEND_OUTER, RECV_INNER, SK_SKB_INGRESS }, + }; + + for (r = redirs; r < redirs + ARRAY_SIZE(redirs); r++) { + enum bpf_attach_type attach_type; + struct test_sockmap_redir *skel; + struct maps maps; + int prog_fd; + + skel = test_sockmap_redir__open_and_load(); + if (!skel) { + FAIL("open_and_load"); + return; + } + + switch (type) { + case BPF_MAP_TYPE_SOCKMAP: + maps.in = bpf_map__fd(skel->maps.nop_map); + maps.out = bpf_map__fd(skel->maps.sock_map); + break; + case BPF_MAP_TYPE_SOCKHASH: + maps.in = bpf_map__fd(skel->maps.nop_hash); + maps.out = bpf_map__fd(skel->maps.sock_hash); + break; + default: + FAIL("Unsupported bpf_map_type"); + return; + } + + skel->bss->redirect_type = type; + maps.verd = bpf_map__fd(skel->maps.verdict_map); + get_redir_params(r, skel, &prog_fd, &attach_type, + &skel->bss->redirect_flags); + + if (xbpf_prog_attach(prog_fd, maps.in, attach_type, 0)) + return; + + test_redir(type, r, &maps); + + if (xbpf_prog_detach2(prog_fd, maps.in, attach_type)) + return; + + test_sockmap_redir__destroy(skel); + } +} + +void serial_test_sockmap_redir(void) +{ + test_map(BPF_MAP_TYPE_SOCKMAP); + test_map(BPF_MAP_TYPE_SOCKHASH); +} From 9266e49d608cb5b8857863ef1d787fe9ccca0ee8 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:29 +0200 Subject: [PATCH 112/135] selftests/bpf: sockmap_listen cleanup: Drop af_vsock redir tests Remove tests covered by sockmap_redir. Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-6-a1ea723f7e7e@rbox.co --- .../selftests/bpf/prog_tests/sockmap_listen.c | 112 ------------------ 1 file changed, 112 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 4ee1148d22be..f0ad548bdf09 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -1487,116 +1487,6 @@ static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *ma unix_skb_redir_to_connected(skel, map, sotype); } -/* Returns two connected loopback vsock sockets */ -static int vsock_socketpair_connectible(int sotype, int *v0, int *v1) -{ - return create_pair(AF_VSOCK, sotype | SOCK_NONBLOCK, v0, v1); -} - -static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd, - enum redir_mode mode, int sotype) -{ - const char *log_prefix = redir_mode_str(mode); - char a = 'a', b = 'b'; - int u0, u1, v0, v1; - int sfd[2]; - unsigned int pass; - int err, n; - u32 key; - - zero_verdict_count(verd_mapfd); - - if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, sfd)) - return; - - u0 = sfd[0]; - u1 = sfd[1]; - - err = vsock_socketpair_connectible(sotype, &v0, &v1); - if (err) { - FAIL("vsock_socketpair_connectible() failed"); - goto close_uds; - } - - err = add_to_sockmap(sock_mapfd, u0, v0); - if (err) { - FAIL("add_to_sockmap failed"); - goto close_vsock; - } - - n = write(v1, &a, sizeof(a)); - if (n < 0) - FAIL_ERRNO("%s: write", log_prefix); - if (n == 0) - FAIL("%s: incomplete write", log_prefix); - if (n < 1) - goto out; - - n = xrecv_nonblock(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), 0); - if (n < 0) - FAIL("%s: recv() err, errno=%d", log_prefix, errno); - if (n == 0) - FAIL("%s: incomplete recv", log_prefix); - if (b != a) - FAIL("%s: vsock socket map failed, %c != %c", log_prefix, a, b); - - key = SK_PASS; - err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); - if (err) - goto out; - if (pass != 1) - FAIL("%s: want pass count 1, have %d", log_prefix, pass); -out: - key = 0; - bpf_map_delete_elem(sock_mapfd, &key); - key = 1; - bpf_map_delete_elem(sock_mapfd, &key); - -close_vsock: - close(v0); - close(v1); - -close_uds: - close(u0); - close(u1); -} - -static void vsock_unix_skb_redir_connectible(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, - int sotype) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_EGRESS, sotype); - skel->bss->test_ingress = true; - vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_INGRESS, sotype); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_vsock_redir(struct test_sockmap_listen *skel, struct bpf_map *map) -{ - const char *family_name, *map_name; - char s[MAX_TEST_NAME]; - - family_name = family_str(AF_VSOCK); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - - vsock_unix_skb_redir_connectible(skel, map, SOCK_STREAM); - vsock_unix_skb_redir_connectible(skel, map, SOCK_SEQPACKET); -} - static void test_reuseport(struct test_sockmap_listen *skel, struct bpf_map *map, int family, int sotype) { @@ -1882,14 +1772,12 @@ void serial_test_sockmap_listen(void) run_tests(skel, skel->maps.sock_map, AF_INET6); test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM); test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM); - test_vsock_redir(skel, skel->maps.sock_map); skel->bss->test_sockmap = false; run_tests(skel, skel->maps.sock_hash, AF_INET); run_tests(skel, skel->maps.sock_hash, AF_INET6); test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM); test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM); - test_vsock_redir(skel, skel->maps.sock_hash); test_sockmap_listen__destroy(skel); } From f3de1cf621f7e8f7653a24b9e0f21ac7aefbbf27 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:30 +0200 Subject: [PATCH 113/135] selftests/bpf: sockmap_listen cleanup: Drop af_unix redir tests Remove tests covered by sockmap_redir. Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-7-a1ea723f7e7e@rbox.co --- .../selftests/bpf/prog_tests/sockmap_listen.c | 219 ------------------ 1 file changed, 219 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index f0ad548bdf09..4f38dd7d23da 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -1429,64 +1429,6 @@ static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1, } } -static void unix_redir_to_connected(int sotype, int sock_mapfd, - int verd_mapfd, enum redir_mode mode) -{ - int c0, c1, p0, p1; - int sfd[2]; - - if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) - return; - c0 = sfd[0], p0 = sfd[1]; - - if (socketpair(AF_UNIX, sotype | SOCK_NONBLOCK, 0, sfd)) - goto close0; - c1 = sfd[0], p1 = sfd[1]; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, - mode, NO_FLAGS); - - xclose(c1); - xclose(p1); -close0: - xclose(c0); - xclose(p0); -} - -static void unix_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int sotype) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_EGRESS); - skel->bss->test_ingress = true; - unix_redir_to_connected(sotype, sock_map, verdict_map, REDIR_INGRESS); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, - int sotype) -{ - const char *family_name, *map_name; - char s[MAX_TEST_NAME]; - - family_name = family_str(AF_UNIX); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - unix_skb_redir_to_connected(skel, map, sotype); -} - static void test_reuseport(struct test_sockmap_listen *skel, struct bpf_map *map, int family, int sotype) { @@ -1589,162 +1531,6 @@ static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map udp_skb_redir_to_connected(skel, map, family); } -static void inet_unix_redir_to_connected(int family, int type, int sock_mapfd, - int verd_mapfd, enum redir_mode mode) -{ - int c0, c1, p0, p1; - int sfd[2]; - int err; - - if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd)) - return; - c0 = sfd[0], p0 = sfd[1]; - - err = inet_socketpair(family, type, &p1, &c1); - if (err) - goto close; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, - mode, NO_FLAGS); - - xclose(c1); - xclose(p1); -close: - xclose(c0); - xclose(p0); -} - -static void inet_unix_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int family) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, - REDIR_EGRESS); - inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, - REDIR_EGRESS); - skel->bss->test_ingress = true; - inet_unix_redir_to_connected(family, SOCK_DGRAM, sock_map, verdict_map, - REDIR_INGRESS); - inet_unix_redir_to_connected(family, SOCK_STREAM, sock_map, verdict_map, - REDIR_INGRESS); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd, - int nop_mapfd, int verd_mapfd, - enum redir_mode mode, int send_flags) -{ - int c0, c1, p0, p1; - int sfd[2]; - int err; - - err = inet_socketpair(family, type, &p0, &c0); - if (err) - return; - - if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd)) - goto close_cli0; - c1 = sfd[0], p1 = sfd[1]; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, nop_mapfd, - verd_mapfd, mode, send_flags); - - xclose(c1); - xclose(p1); -close_cli0: - xclose(c0); - xclose(p0); -} - -static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int family) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int nop_map = bpf_map__fd(skel->maps.nop_map); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, -1, verdict_map, - REDIR_EGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, -1, verdict_map, - REDIR_EGRESS, NO_FLAGS); - - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, nop_map, verdict_map, - REDIR_EGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_EGRESS, NO_FLAGS); - - /* MSG_OOB not supported by AF_UNIX SOCK_DGRAM */ - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_EGRESS, MSG_OOB); - - skel->bss->test_ingress = true; - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, -1, verdict_map, - REDIR_INGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, -1, verdict_map, - REDIR_INGRESS, NO_FLAGS); - - unix_inet_redir_to_connected(family, SOCK_DGRAM, - sock_map, nop_map, verdict_map, - REDIR_INGRESS, NO_FLAGS); - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_INGRESS, NO_FLAGS); - - /* MSG_OOB not supported by AF_UNIX SOCK_DGRAM */ - unix_inet_redir_to_connected(family, SOCK_STREAM, - sock_map, nop_map, verdict_map, - REDIR_INGRESS, MSG_OOB); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_udp_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *map, - int family) -{ - const char *family_name, *map_name; - struct netns_obj *netns; - char s[MAX_TEST_NAME]; - - family_name = family_str(family); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - - netns = netns_new("sockmap_listen", true); - if (!ASSERT_OK_PTR(netns, "netns_new")) - return; - - inet_unix_skb_redir_to_connected(skel, map, family); - unix_inet_skb_redir_to_connected(skel, map, family); - - netns_free(netns); -} - static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, int family) { @@ -1754,7 +1540,6 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, test_reuseport(skel, map, family, SOCK_STREAM); test_reuseport(skel, map, family, SOCK_DGRAM); test_udp_redir(skel, map, family); - test_udp_unix_redir(skel, map, family); } void serial_test_sockmap_listen(void) @@ -1770,14 +1555,10 @@ void serial_test_sockmap_listen(void) skel->bss->test_sockmap = true; run_tests(skel, skel->maps.sock_map, AF_INET); run_tests(skel, skel->maps.sock_map, AF_INET6); - test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM); - test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM); skel->bss->test_sockmap = false; run_tests(skel, skel->maps.sock_hash, AF_INET); run_tests(skel, skel->maps.sock_hash, AF_INET6); - test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM); - test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM); test_sockmap_listen__destroy(skel); } From c04eeeb2af8ea2c3f764d2d093bb9b435024a019 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Thu, 15 May 2025 00:15:31 +0200 Subject: [PATCH 114/135] selftests/bpf: sockmap_listen cleanup: Drop af_inet SOCK_DGRAM redir tests Remove tests covered by sockmap_redir. Signed-off-by: Michal Luczaj Signed-off-by: Martin KaFai Lau Acked-by: John Fastabend Link: https://lore.kernel.org/r/20250515-selftests-sockmap-redir-v3-8-a1ea723f7e7e@rbox.co --- .../selftests/bpf/prog_tests/sockmap_listen.c | 126 ------------------ 1 file changed, 126 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 4f38dd7d23da..1d98eee7a2c3 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -1366,69 +1366,6 @@ static void test_redir(struct test_sockmap_listen *skel, struct bpf_map *map, } } -static void pairs_redir_to_connected(int cli0, int peer0, int cli1, int peer1, - int sock_mapfd, int nop_mapfd, - int verd_mapfd, enum redir_mode mode, - int send_flags) -{ - const char *log_prefix = redir_mode_str(mode); - unsigned int pass; - int err, n; - u32 key; - char b; - - zero_verdict_count(verd_mapfd); - - err = add_to_sockmap(sock_mapfd, peer0, peer1); - if (err) - return; - - if (nop_mapfd >= 0) { - err = add_to_sockmap(nop_mapfd, cli0, cli1); - if (err) - return; - } - - /* Last byte is OOB data when send_flags has MSG_OOB bit set */ - n = xsend(cli1, "ab", 2, send_flags); - if (n >= 0 && n < 2) - FAIL("%s: incomplete send", log_prefix); - if (n < 2) - return; - - key = SK_PASS; - err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); - if (err) - return; - if (pass != 1) - FAIL("%s: want pass count 1, have %d", log_prefix, pass); - - n = recv_timeout(mode == REDIR_INGRESS ? peer0 : cli0, &b, 1, 0, IO_TIMEOUT_SEC); - if (n < 0) - FAIL_ERRNO("%s: recv_timeout", log_prefix); - if (n == 0) - FAIL("%s: incomplete recv", log_prefix); - - if (send_flags & MSG_OOB) { - /* Check that we can't read OOB while in sockmap */ - errno = 0; - n = recv(peer1, &b, 1, MSG_OOB | MSG_DONTWAIT); - if (n != -1 || errno != EOPNOTSUPP) - FAIL("%s: recv(MSG_OOB): expected EOPNOTSUPP: retval=%d errno=%d", - log_prefix, n, errno); - - /* Remove peer1 from sockmap */ - xbpf_map_delete_elem(sock_mapfd, &(int){ 1 }); - - /* Check that OOB was dropped on redirect */ - errno = 0; - n = recv(peer1, &b, 1, MSG_OOB | MSG_DONTWAIT); - if (n != -1 || errno != EINVAL) - FAIL("%s: recv(MSG_OOB): expected EINVAL: retval=%d errno=%d", - log_prefix, n, errno); - } -} - static void test_reuseport(struct test_sockmap_listen *skel, struct bpf_map *map, int family, int sotype) { @@ -1469,68 +1406,6 @@ static void test_reuseport(struct test_sockmap_listen *skel, } } -static int inet_socketpair(int family, int type, int *s, int *c) -{ - return create_pair(family, type | SOCK_NONBLOCK, s, c); -} - -static void udp_redir_to_connected(int family, int sock_mapfd, int verd_mapfd, - enum redir_mode mode) -{ - int c0, c1, p0, p1; - int err; - - err = inet_socketpair(family, SOCK_DGRAM, &p0, &c0); - if (err) - return; - err = inet_socketpair(family, SOCK_DGRAM, &p1, &c1); - if (err) - goto close_cli0; - - pairs_redir_to_connected(c0, p0, c1, p1, sock_mapfd, -1, verd_mapfd, - mode, NO_FLAGS); - - xclose(c1); - xclose(p1); -close_cli0: - xclose(c0); - xclose(p0); -} - -static void udp_skb_redir_to_connected(struct test_sockmap_listen *skel, - struct bpf_map *inner_map, int family) -{ - int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); - int verdict_map = bpf_map__fd(skel->maps.verdict_map); - int sock_map = bpf_map__fd(inner_map); - int err; - - err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); - if (err) - return; - - skel->bss->test_ingress = false; - udp_redir_to_connected(family, sock_map, verdict_map, REDIR_EGRESS); - skel->bss->test_ingress = true; - udp_redir_to_connected(family, sock_map, verdict_map, REDIR_INGRESS); - - xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); -} - -static void test_udp_redir(struct test_sockmap_listen *skel, struct bpf_map *map, - int family) -{ - const char *family_name, *map_name; - char s[MAX_TEST_NAME]; - - family_name = family_str(family); - map_name = map_type_str(map); - snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); - if (!test__start_subtest(s)) - return; - udp_skb_redir_to_connected(skel, map, family); -} - static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, int family) { @@ -1539,7 +1414,6 @@ static void run_tests(struct test_sockmap_listen *skel, struct bpf_map *map, test_redir(skel, map, family, SOCK_STREAM); test_reuseport(skel, map, family, SOCK_STREAM); test_reuseport(skel, map, family, SOCK_DGRAM); - test_udp_redir(skel, map, family); } void serial_test_sockmap_listen(void) From 8259eb0e06d8f64c700f5fbdb28a5c18e10de291 Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Fri, 16 May 2025 22:17:12 +0800 Subject: [PATCH 115/135] bpf, sockmap: Avoid using sk_socket after free when sending The sk->sk_socket is not locked or referenced in backlog thread, and during the call to skb_send_sock(), there is a race condition with the release of sk_socket. All types of sockets(tcp/udp/unix/vsock) will be affected. Race conditions: ''' CPU0 CPU1 backlog::skb_send_sock sendmsg_unlocked sock_sendmsg sock_sendmsg_nosec close(fd): ... ops->release() -> sock_map_close() sk_socket->ops = NULL free(socket) sock->ops->sendmsg ^ panic here ''' The ref of psock become 0 after sock_map_close() executed. ''' void sock_map_close() { ... if (likely(psock)) { ... // !! here we remove psock and the ref of psock become 0 sock_map_remove_links(sk, psock) psock = sk_psock_get(sk); if (unlikely(!psock)) goto no_psock; <=== Control jumps here via goto ... cancel_delayed_work_sync(&psock->work); <=== not executed sk_psock_put(sk, psock); ... } ''' Based on the fact that we already wait for the workqueue to finish in sock_map_close() if psock is held, we simply increase the psock reference count to avoid race conditions. With this patch, if the backlog thread is running, sock_map_close() will wait for the backlog thread to complete and cancel all pending work. If no backlog running, any pending work that hasn't started by then will fail when invoked by sk_psock_get(), as the psock reference count have been zeroed, and sk_psock_drop() will cancel all jobs via cancel_delayed_work_sync(). In summary, we require synchronization to coordinate the backlog thread and close() thread. The panic I catched: ''' Workqueue: events sk_psock_backlog RIP: 0010:sock_sendmsg+0x21d/0x440 RAX: 0000000000000000 RBX: ffffc9000521fad8 RCX: 0000000000000001 ... Call Trace: ? die_addr+0x40/0xa0 ? exc_general_protection+0x14c/0x230 ? asm_exc_general_protection+0x26/0x30 ? sock_sendmsg+0x21d/0x440 ? sock_sendmsg+0x3e0/0x440 ? __pfx_sock_sendmsg+0x10/0x10 __skb_send_sock+0x543/0xb70 sk_psock_backlog+0x247/0xb80 ... ''' Fixes: 4b4647add7d3 ("sock_map: avoid race between sock_map_close and sk_psock_put") Reported-by: Michal Luczaj Signed-off-by: Jiayuan Chen Signed-off-by: Martin KaFai Lau Reviewed-by: John Fastabend Link: https://lore.kernel.org/r/20250516141713.291150-1-jiayuan.chen@linux.dev --- net/core/skmsg.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 276934673066..34c51eb1a14f 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -656,6 +656,13 @@ static void sk_psock_backlog(struct work_struct *work) bool ingress; int ret; + /* Increment the psock refcnt to synchronize with close(fd) path in + * sock_map_close(), ensuring we wait for backlog thread completion + * before sk_socket freed. If refcnt increment fails, it indicates + * sock_map_close() completed with sk_socket potentially already freed. + */ + if (!sk_psock_get(psock->sk)) + return; mutex_lock(&psock->work_mutex); while ((skb = skb_peek(&psock->ingress_skb))) { len = skb->len; @@ -708,6 +715,7 @@ static void sk_psock_backlog(struct work_struct *work) } end: mutex_unlock(&psock->work_mutex); + sk_psock_put(psock->sk, psock); } struct sk_psock *sk_psock_init(struct sock *sk, int node) From a539e2a6d51d1c12d89eec149ccc72ec561639bc Mon Sep 17 00:00:00 2001 From: Lorenz Bauer Date: Tue, 20 May 2025 14:01:17 +0100 Subject: [PATCH 116/135] btf: Allow mmap of vmlinux btf User space needs access to kernel BTF for many modern features of BPF. Right now each process needs to read the BTF blob either in pieces or as a whole. Allow mmaping the sysfs file so that processes can directly access the memory allocated for it in the kernel. remap_pfn_range is used instead of vm_insert_page due to aarch64 compatibility issues. Signed-off-by: Lorenz Bauer Signed-off-by: Andrii Nakryiko Tested-by: Alan Maguire Reviewed-by: Shakeel Butt Link: https://lore.kernel.org/bpf/20250520-vmlinux-mmap-v5-1-e8c941acc414@isovalent.com --- include/asm-generic/vmlinux.lds.h | 3 ++- kernel/bpf/sysfs_btf.c | 32 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 58a635a6d5bd..1750390735fa 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -667,10 +667,11 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) */ #ifdef CONFIG_DEBUG_INFO_BTF #define BTF \ + . = ALIGN(PAGE_SIZE); \ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ BOUNDED_SECTION_BY(.BTF, _BTF) \ } \ - . = ALIGN(4); \ + . = ALIGN(PAGE_SIZE); \ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ *(.BTF_ids) \ } diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c index 81d6cf90584a..941d0d2427e3 100644 --- a/kernel/bpf/sysfs_btf.c +++ b/kernel/bpf/sysfs_btf.c @@ -7,14 +7,46 @@ #include #include #include +#include +#include +#include /* See scripts/link-vmlinux.sh, gen_btf() func for details */ extern char __start_BTF[]; extern char __stop_BTF[]; +static int btf_sysfs_vmlinux_mmap(struct file *filp, struct kobject *kobj, + const struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT; + size_t vm_size = vma->vm_end - vma->vm_start; + phys_addr_t addr = virt_to_phys(__start_BTF); + unsigned long pfn = addr >> PAGE_SHIFT; + + if (attr->private != __start_BTF || !PAGE_ALIGNED(addr)) + return -EINVAL; + + if (vma->vm_pgoff) + return -EINVAL; + + if (vma->vm_flags & (VM_WRITE | VM_EXEC | VM_MAYSHARE)) + return -EACCES; + + if (pfn + pages < pfn) + return -EINVAL; + + if ((vm_size >> PAGE_SHIFT) > pages) + return -EINVAL; + + vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC | VM_MAYWRITE); + return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot); +} + static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = { .attr = { .name = "vmlinux", .mode = 0444, }, .read_new = sysfs_bin_attr_simple_read, + .mmap = btf_sysfs_vmlinux_mmap, }; struct kobject *btf_kobj; From 828226b69ff54d57afd7b3cb56095ef8186ba290 Mon Sep 17 00:00:00 2001 From: Lorenz Bauer Date: Tue, 20 May 2025 14:01:18 +0100 Subject: [PATCH 117/135] selftests: bpf: Add a test for mmapable vmlinux BTF Add a basic test for the ability to mmap /sys/kernel/btf/vmlinux. Ensure that the data is valid BTF and that it is padded with zero. Signed-off-by: Lorenz Bauer Signed-off-by: Andrii Nakryiko Tested-by: Alan Maguire Link: https://lore.kernel.org/bpf/20250520-vmlinux-mmap-v5-2-e8c941acc414@isovalent.com --- .../selftests/bpf/prog_tests/btf_sysfs.c | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/btf_sysfs.c diff --git a/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c new file mode 100644 index 000000000000..3923e64c4c1d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2025 Isovalent */ + +#include +#include +#include +#include +#include +#include + +static void test_btf_mmap_sysfs(const char *path, struct btf *base) +{ + struct stat st; + __u64 btf_size, end; + void *raw_data = NULL; + int fd = -1; + long page_size; + struct btf *btf = NULL; + + page_size = sysconf(_SC_PAGESIZE); + if (!ASSERT_GE(page_size, 0, "get_page_size")) + goto cleanup; + + if (!ASSERT_OK(stat(path, &st), "stat_btf")) + goto cleanup; + + btf_size = st.st_size; + end = (btf_size + page_size - 1) / page_size * page_size; + + fd = open(path, O_RDONLY); + if (!ASSERT_GE(fd, 0, "open_btf")) + goto cleanup; + + raw_data = mmap(NULL, btf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_writable")) + goto cleanup; + + raw_data = mmap(NULL, btf_size, PROT_READ, MAP_SHARED, fd, 0); + if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_shared")) + goto cleanup; + + raw_data = mmap(NULL, end + 1, PROT_READ, MAP_PRIVATE, fd, 0); + if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_invalid_size")) + goto cleanup; + + raw_data = mmap(NULL, end, PROT_READ, MAP_PRIVATE, fd, 0); + if (!ASSERT_OK_PTR(raw_data, "mmap_btf")) + goto cleanup; + + if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_WRITE), -1, + "mprotect_writable")) + goto cleanup; + + if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_EXEC), -1, + "mprotect_executable")) + goto cleanup; + + /* Check padding is zeroed */ + for (int i = btf_size; i < end; i++) { + if (((__u8 *)raw_data)[i] != 0) { + PRINT_FAIL("tail of BTF is not zero at page offset %d\n", i); + goto cleanup; + } + } + + btf = btf__new_split(raw_data, btf_size, base); + if (!ASSERT_OK_PTR(btf, "parse_btf")) + goto cleanup; + +cleanup: + btf__free(btf); + if (raw_data && raw_data != MAP_FAILED) + munmap(raw_data, btf_size); + if (fd >= 0) + close(fd); +} + +void test_btf_sysfs(void) +{ + test_btf_mmap_sysfs("/sys/kernel/btf/vmlinux", NULL); +} From 3c0421c93ce4ff0f5f2612666122c34fc941d569 Mon Sep 17 00:00:00 2001 From: Lorenz Bauer Date: Tue, 20 May 2025 14:01:19 +0100 Subject: [PATCH 118/135] libbpf: Use mmap to parse vmlinux BTF from sysfs Teach libbpf to use mmap when parsing vmlinux BTF from /sys. We don't apply this to fall-back paths on the regular file system because there is no way to ensure that modifications underlying the MAP_PRIVATE mapping are not visible to the process. Signed-off-by: Lorenz Bauer Signed-off-by: Andrii Nakryiko Tested-by: Alan Maguire Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250520-vmlinux-mmap-v5-3-e8c941acc414@isovalent.com --- tools/lib/bpf/btf.c | 89 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 71 insertions(+), 18 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 8d0d0b645a75..f1d495dc66bb 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -120,6 +121,9 @@ struct btf { /* whether base_btf should be freed in btf_free for this instance */ bool owns_base; + /* whether raw_data is a (read-only) mmap */ + bool raw_data_is_mmap; + /* BTF object FD, if loaded into kernel */ int fd; @@ -951,6 +955,17 @@ static bool btf_is_modifiable(const struct btf *btf) return (void *)btf->hdr != btf->raw_data; } +static void btf_free_raw_data(struct btf *btf) +{ + if (btf->raw_data_is_mmap) { + munmap(btf->raw_data, btf->raw_size); + btf->raw_data_is_mmap = false; + } else { + free(btf->raw_data); + } + btf->raw_data = NULL; +} + void btf__free(struct btf *btf) { if (IS_ERR_OR_NULL(btf)) @@ -970,7 +985,7 @@ void btf__free(struct btf *btf) free(btf->types_data); strset__free(btf->strs_set); } - free(btf->raw_data); + btf_free_raw_data(btf); free(btf->raw_data_swapped); free(btf->type_offs); if (btf->owns_base) @@ -1030,7 +1045,7 @@ struct btf *btf__new_empty_split(struct btf *base_btf) return libbpf_ptr(btf_new_empty(base_btf)); } -static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) +static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf, bool is_mmap) { struct btf *btf; int err; @@ -1050,12 +1065,18 @@ static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf) btf->start_str_off = base_btf->hdr->str_len; } - btf->raw_data = malloc(size); - if (!btf->raw_data) { - err = -ENOMEM; - goto done; + if (is_mmap) { + btf->raw_data = (void *)data; + btf->raw_data_is_mmap = true; + } else { + btf->raw_data = malloc(size); + if (!btf->raw_data) { + err = -ENOMEM; + goto done; + } + memcpy(btf->raw_data, data, size); } - memcpy(btf->raw_data, data, size); + btf->raw_size = size; btf->hdr = btf->raw_data; @@ -1083,12 +1104,12 @@ done: struct btf *btf__new(const void *data, __u32 size) { - return libbpf_ptr(btf_new(data, size, NULL)); + return libbpf_ptr(btf_new(data, size, NULL, false)); } struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf) { - return libbpf_ptr(btf_new(data, size, base_btf)); + return libbpf_ptr(btf_new(data, size, base_btf, false)); } struct btf_elf_secs { @@ -1209,7 +1230,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, if (secs.btf_base_data) { dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, - NULL); + NULL, false); if (IS_ERR(dist_base_btf)) { err = PTR_ERR(dist_base_btf); dist_base_btf = NULL; @@ -1218,7 +1239,7 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, } btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, - dist_base_btf ?: base_btf); + dist_base_btf ?: base_btf, false); if (IS_ERR(btf)) { err = PTR_ERR(btf); goto done; @@ -1335,7 +1356,7 @@ static struct btf *btf_parse_raw(const char *path, struct btf *base_btf) } /* finally parse BTF data */ - btf = btf_new(data, sz, base_btf); + btf = btf_new(data, sz, base_btf, false); err_out: free(data); @@ -1354,6 +1375,37 @@ struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf) return libbpf_ptr(btf_parse_raw(path, base_btf)); } +static struct btf *btf_parse_raw_mmap(const char *path, struct btf *base_btf) +{ + struct stat st; + void *data; + struct btf *btf; + int fd, err; + + fd = open(path, O_RDONLY); + if (fd < 0) + return libbpf_err_ptr(-errno); + + if (fstat(fd, &st) < 0) { + err = -errno; + close(fd); + return libbpf_err_ptr(err); + } + + data = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + err = -errno; + close(fd); + + if (data == MAP_FAILED) + return libbpf_err_ptr(err); + + btf = btf_new(data, st.st_size, base_btf, true); + if (IS_ERR(btf)) + munmap(data, st.st_size); + + return btf; +} + static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) { struct btf *btf; @@ -1618,7 +1670,7 @@ struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf) goto exit_free; } - btf = btf_new(ptr, btf_info.btf_size, base_btf); + btf = btf_new(ptr, btf_info.btf_size, base_btf, false); exit_free: free(ptr); @@ -1658,10 +1710,8 @@ struct btf *btf__load_from_kernel_by_id(__u32 id) static void btf_invalidate_raw_data(struct btf *btf) { - if (btf->raw_data) { - free(btf->raw_data); - btf->raw_data = NULL; - } + if (btf->raw_data) + btf_free_raw_data(btf); if (btf->raw_data_swapped) { free(btf->raw_data_swapped); btf->raw_data_swapped = NULL; @@ -5331,7 +5381,10 @@ struct btf *btf__load_vmlinux_btf(void) pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n", sysfs_btf_path); } else { - btf = btf__parse(sysfs_btf_path, NULL); + btf = btf_parse_raw_mmap(sysfs_btf_path, NULL); + if (IS_ERR(btf)) + btf = btf__parse(sysfs_btf_path, NULL); + if (!btf) { err = -errno; pr_warn("failed to read kernel BTF from '%s': %s\n", From 079e5c56a5c41d285068939ff7b0041ab10386fa Mon Sep 17 00:00:00 2001 From: Mykyta Yatsenko Date: Fri, 23 May 2025 19:17:05 +0100 Subject: [PATCH 119/135] bpf: Fix error return value in bpf_copy_from_user_dynptr On error, copy_from_user returns number of bytes not copied to destination, but current implementation of copy_user_data_sleepable does not handle that correctly and returns it as error value, which may confuse user, expecting meaningful negative error value. Fixes: a498ee7576de ("bpf: Implement dynptr copy kfuncs") Reported-by: Dan Carpenter Signed-off-by: Mykyta Yatsenko Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250523181705.261585-1-mykyta.yatsenko5@gmail.com --- kernel/trace/bpf_trace.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 289ff0caa83e..132c8be6f635 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3555,8 +3555,12 @@ static __always_inline int copy_user_data_sleepable(void *dst, const void *unsaf { int ret; - if (!tsk) /* Read from the current task */ - return copy_from_user(dst, (const void __user *)unsafe_src, size); + if (!tsk) { /* Read from the current task */ + ret = copy_from_user(dst, (const void __user *)unsafe_src, size); + if (ret) + return -EFAULT; + return 0; + } ret = access_process_vm(tsk, (unsigned long)unsafe_src, dst, size, 0); if (ret != size) From 89f9dba365e1b85caef556d28654c6d336629eef Mon Sep 17 00:00:00 2001 From: "T.J. Mercier" Date: Thu, 22 May 2025 23:04:25 +0000 Subject: [PATCH 120/135] dma-buf: Rename debugfs symbols MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the debugfs list and mutex so it's clear they are now usable without the need for CONFIG_DEBUG_FS. The list will always be populated to support the creation of a BPF iterator for dmabufs. Signed-off-by: T.J. Mercier Reviewed-by: Christian König Acked-by: Song Liu Link: https://lore.kernel.org/r/20250522230429.941193-2-tjmercier@google.com Signed-off-by: Alexei Starovoitov --- drivers/dma-buf/dma-buf.c | 40 +++++++++++++++------------------------ include/linux/dma-buf.h | 2 -- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 5baa83b85515..8d151784e302 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -35,35 +35,25 @@ static inline int is_dma_buf_file(struct file *); -#if IS_ENABLED(CONFIG_DEBUG_FS) -static DEFINE_MUTEX(debugfs_list_mutex); -static LIST_HEAD(debugfs_list); +static DEFINE_MUTEX(dmabuf_list_mutex); +static LIST_HEAD(dmabuf_list); -static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) +static void __dma_buf_list_add(struct dma_buf *dmabuf) { - mutex_lock(&debugfs_list_mutex); - list_add(&dmabuf->list_node, &debugfs_list); - mutex_unlock(&debugfs_list_mutex); + mutex_lock(&dmabuf_list_mutex); + list_add(&dmabuf->list_node, &dmabuf_list); + mutex_unlock(&dmabuf_list_mutex); } -static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) +static void __dma_buf_list_del(struct dma_buf *dmabuf) { if (!dmabuf) return; - mutex_lock(&debugfs_list_mutex); + mutex_lock(&dmabuf_list_mutex); list_del(&dmabuf->list_node); - mutex_unlock(&debugfs_list_mutex); + mutex_unlock(&dmabuf_list_mutex); } -#else -static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) -{ -} - -static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) -{ -} -#endif static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { @@ -115,7 +105,7 @@ static int dma_buf_file_release(struct inode *inode, struct file *file) if (!is_dma_buf_file(file)) return -EINVAL; - __dma_buf_debugfs_list_del(file->private_data); + __dma_buf_list_del(file->private_data); return 0; } @@ -689,7 +679,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; - __dma_buf_debugfs_list_add(dmabuf); + __dma_buf_list_add(dmabuf); return dmabuf; @@ -1630,7 +1620,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) size_t size = 0; int ret; - ret = mutex_lock_interruptible(&debugfs_list_mutex); + ret = mutex_lock_interruptible(&dmabuf_list_mutex); if (ret) return ret; @@ -1639,7 +1629,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", "size", "flags", "mode", "count", "ino"); - list_for_each_entry(buf_obj, &debugfs_list, list_node) { + list_for_each_entry(buf_obj, &dmabuf_list, list_node) { ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); if (ret) @@ -1676,11 +1666,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); - mutex_unlock(&debugfs_list_mutex); + mutex_unlock(&dmabuf_list_mutex); return 0; error_unlock: - mutex_unlock(&debugfs_list_mutex); + mutex_unlock(&dmabuf_list_mutex); return ret; } diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 36216d28d8bd..8ff4add71f88 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -370,10 +370,8 @@ struct dma_buf { */ struct module *owner; -#if IS_ENABLED(CONFIG_DEBUG_FS) /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; -#endif /** @priv: exporter specific private data for this buffer object. */ void *priv; From 76ea95534995adde1aa3cb1aa97ef33f50a617a9 Mon Sep 17 00:00:00 2001 From: "T.J. Mercier" Date: Thu, 22 May 2025 23:04:26 +0000 Subject: [PATCH 121/135] bpf: Add dmabuf iterator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The dmabuf iterator traverses the list of all DMA buffers. DMA buffers are refcounted through their associated struct file. A reference is taken on each buffer as the list is iterated to ensure each buffer persists for the duration of the bpf program execution without holding the list mutex. Signed-off-by: T.J. Mercier Reviewed-by: Christian König Acked-by: Song Liu Link: https://lore.kernel.org/r/20250522230429.941193-3-tjmercier@google.com Signed-off-by: Alexei Starovoitov --- drivers/dma-buf/dma-buf.c | 68 +++++++++++++++++++++++++ include/linux/dma-buf.h | 2 + kernel/bpf/Makefile | 3 ++ kernel/bpf/dmabuf_iter.c | 102 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 175 insertions(+) create mode 100644 kernel/bpf/dmabuf_iter.c diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 8d151784e302..6b59506a1b94 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -19,7 +19,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -55,6 +57,72 @@ static void __dma_buf_list_del(struct dma_buf *dmabuf) mutex_unlock(&dmabuf_list_mutex); } +/** + * dma_buf_iter_begin - begin iteration through global list of all DMA buffers + * + * Returns the first buffer in the global list of DMA-bufs that's not in the + * process of being destroyed. Increments that buffer's reference count to + * prevent buffer destruction. Callers must release the reference, either by + * continuing iteration with dma_buf_iter_next(), or with dma_buf_put(). + * + * Return: + * * First buffer from global list, with refcount elevated + * * NULL if no active buffers are present + */ +struct dma_buf *dma_buf_iter_begin(void) +{ + struct dma_buf *ret = NULL, *dmabuf; + + /* + * The list mutex does not protect a dmabuf's refcount, so it can be + * zeroed while we are iterating. We cannot call get_dma_buf() since the + * caller may not already own a reference to the buffer. + */ + mutex_lock(&dmabuf_list_mutex); + list_for_each_entry(dmabuf, &dmabuf_list, list_node) { + if (file_ref_get(&dmabuf->file->f_ref)) { + ret = dmabuf; + break; + } + } + mutex_unlock(&dmabuf_list_mutex); + return ret; +} + +/** + * dma_buf_iter_next - continue iteration through global list of all DMA buffers + * @dmabuf: [in] pointer to dma_buf + * + * Decrements the reference count on the provided buffer. Returns the next + * buffer from the remainder of the global list of DMA-bufs with its reference + * count incremented. Callers must release the reference, either by continuing + * iteration with dma_buf_iter_next(), or with dma_buf_put(). + * + * Return: + * * Next buffer from global list, with refcount elevated + * * NULL if no additional active buffers are present + */ +struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf) +{ + struct dma_buf *ret = NULL; + + /* + * The list mutex does not protect a dmabuf's refcount, so it can be + * zeroed while we are iterating. We cannot call get_dma_buf() since the + * caller may not already own a reference to the buffer. + */ + mutex_lock(&dmabuf_list_mutex); + dma_buf_put(dmabuf); + list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) { + if (file_ref_get(&dmabuf->file->f_ref)) { + ret = dmabuf; + break; + } + } + mutex_unlock(&dmabuf_list_mutex); + return ret; +} + static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { struct dma_buf *dmabuf; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8ff4add71f88..7af2ea839f58 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -634,4 +634,6 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map); void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map); +struct dma_buf *dma_buf_iter_begin(void); +struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf); #endif /* __DMA_BUF_H__ */ diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 70502f038b92..3a335c50e6e3 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -53,6 +53,9 @@ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o obj-$(CONFIG_BPF_SYSCALL) += kmem_cache_iter.o +ifeq ($(CONFIG_DMA_SHARED_BUFFER),y) +obj-$(CONFIG_BPF_SYSCALL) += dmabuf_iter.o +endif CFLAGS_REMOVE_percpu_freelist.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE) diff --git a/kernel/bpf/dmabuf_iter.c b/kernel/bpf/dmabuf_iter.c new file mode 100644 index 000000000000..83ef54d78b62 --- /dev/null +++ b/kernel/bpf/dmabuf_iter.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2025 Google LLC */ +#include +#include +#include +#include +#include + +static void *dmabuf_iter_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos) + return NULL; + + return dma_buf_iter_begin(); +} + +static void *dmabuf_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct dma_buf *dmabuf = v; + + ++*pos; + + return dma_buf_iter_next(dmabuf); +} + +struct bpf_iter__dmabuf { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct dma_buf *, dmabuf); +}; + +static int __dmabuf_seq_show(struct seq_file *seq, void *v, bool in_stop) +{ + struct bpf_iter_meta meta = { + .seq = seq, + }; + struct bpf_iter__dmabuf ctx = { + .meta = &meta, + .dmabuf = v, + }; + struct bpf_prog *prog = bpf_iter_get_info(&meta, in_stop); + + if (prog) + return bpf_iter_run_prog(prog, &ctx); + + return 0; +} + +static int dmabuf_iter_seq_show(struct seq_file *seq, void *v) +{ + return __dmabuf_seq_show(seq, v, false); +} + +static void dmabuf_iter_seq_stop(struct seq_file *seq, void *v) +{ + struct dma_buf *dmabuf = v; + + if (dmabuf) + dma_buf_put(dmabuf); +} + +static const struct seq_operations dmabuf_iter_seq_ops = { + .start = dmabuf_iter_seq_start, + .next = dmabuf_iter_seq_next, + .stop = dmabuf_iter_seq_stop, + .show = dmabuf_iter_seq_show, +}; + +static void bpf_iter_dmabuf_show_fdinfo(const struct bpf_iter_aux_info *aux, + struct seq_file *seq) +{ + seq_puts(seq, "dmabuf iter\n"); +} + +static const struct bpf_iter_seq_info dmabuf_iter_seq_info = { + .seq_ops = &dmabuf_iter_seq_ops, + .init_seq_private = NULL, + .fini_seq_private = NULL, + .seq_priv_size = 0, +}; + +static struct bpf_iter_reg bpf_dmabuf_reg_info = { + .target = "dmabuf", + .feature = BPF_ITER_RESCHED, + .show_fdinfo = bpf_iter_dmabuf_show_fdinfo, + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__dmabuf, dmabuf), + PTR_TO_BTF_ID_OR_NULL }, + }, + .seq_info = &dmabuf_iter_seq_info, +}; + +DEFINE_BPF_ITER_FUNC(dmabuf, struct bpf_iter_meta *meta, struct dma_buf *dmabuf) +BTF_ID_LIST_SINGLE(bpf_dmabuf_btf_id, struct, dma_buf) + +static int __init dmabuf_iter_init(void) +{ + bpf_dmabuf_reg_info.ctx_arg_info[0].btf_id = bpf_dmabuf_btf_id[0]; + return bpf_iter_reg_target(&bpf_dmabuf_reg_info); +} + +late_initcall(dmabuf_iter_init); From 6eab7ac7c5eea7628b92cd5f9427bbd963a954ec Mon Sep 17 00:00:00 2001 From: "T.J. Mercier" Date: Thu, 22 May 2025 23:04:27 +0000 Subject: [PATCH 122/135] bpf: Add open coded dmabuf iterator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This open coded iterator allows for more flexibility when creating BPF programs. It can support output in formats other than text. With an open coded iterator, a single BPF program can traverse multiple kernel data structures (now including dmabufs), allowing for more efficient analysis of kernel data compared to multiple reads from procfs, sysfs, or multiple traditional BPF iterator invocations. Signed-off-by: T.J. Mercier Acked-by: Christian König Acked-by: Song Liu Link: https://lore.kernel.org/r/20250522230429.941193-4-tjmercier@google.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/dmabuf_iter.c | 48 ++++++++++++++++++++++++++++++++++++++++ kernel/bpf/helpers.c | 5 +++++ 2 files changed, 53 insertions(+) diff --git a/kernel/bpf/dmabuf_iter.c b/kernel/bpf/dmabuf_iter.c index 83ef54d78b62..4dd7ef7c145c 100644 --- a/kernel/bpf/dmabuf_iter.c +++ b/kernel/bpf/dmabuf_iter.c @@ -100,3 +100,51 @@ static int __init dmabuf_iter_init(void) } late_initcall(dmabuf_iter_init); + +struct bpf_iter_dmabuf { + /* + * opaque iterator state; having __u64 here allows to preserve correct + * alignment requirements in vmlinux.h, generated from BTF + */ + __u64 __opaque[1]; +} __aligned(8); + +/* Non-opaque version of bpf_iter_dmabuf */ +struct bpf_iter_dmabuf_kern { + struct dma_buf *dmabuf; +} __aligned(8); + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) +{ + struct bpf_iter_dmabuf_kern *kit = (void *)it; + + BUILD_BUG_ON(sizeof(*kit) > sizeof(*it)); + BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it)); + + kit->dmabuf = NULL; + return 0; +} + +__bpf_kfunc struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) +{ + struct bpf_iter_dmabuf_kern *kit = (void *)it; + + if (kit->dmabuf) + kit->dmabuf = dma_buf_iter_next(kit->dmabuf); + else + kit->dmabuf = dma_buf_iter_begin(); + + return kit->dmabuf; +} + +__bpf_kfunc void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) +{ + struct bpf_iter_dmabuf_kern *kit = (void *)it; + + if (kit->dmabuf) + dma_buf_put(kit->dmabuf); +} + +__bpf_kfunc_end_defs(); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index c1113b74e1e2..bd17ed5bfc4b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3386,6 +3386,11 @@ BTF_ID_FLAGS(func, bpf_copy_from_user_dynptr, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_copy_from_user_str_dynptr, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_copy_from_user_task_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_copy_from_user_task_str_dynptr, KF_SLEEPABLE | KF_TRUSTED_ARGS) +#ifdef CONFIG_DMA_SHARED_BUFFER +BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) +#endif BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { From ae5d2c59ecd78df65254f4a40178b34f752bc1a9 Mon Sep 17 00:00:00 2001 From: "T.J. Mercier" Date: Thu, 22 May 2025 23:04:28 +0000 Subject: [PATCH 123/135] selftests/bpf: Add test for dmabuf_iter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This test creates a udmabuf, and a dmabuf from the system dmabuf heap, and uses a BPF program that prints dmabuf metadata with the new dmabuf_iter to verify they can be found. Signed-off-by: T.J. Mercier Acked-by: Christian König Acked-by: Song Liu Link: https://lore.kernel.org/r/20250522230429.941193-5-tjmercier@google.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/config | 3 + .../selftests/bpf/prog_tests/dmabuf_iter.c | 244 ++++++++++++++++++ .../testing/selftests/bpf/progs/dmabuf_iter.c | 53 ++++ 3 files changed, 300 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c create mode 100644 tools/testing/selftests/bpf/progs/dmabuf_iter.c diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index c378d5d07e02..2bdff2f3285f 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -22,6 +22,8 @@ CONFIG_CRYPTO_AES=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_BTF=y CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DMABUF_HEAPS=y +CONFIG_DMABUF_HEAPS_SYSTEM=y CONFIG_DUMMY=y CONFIG_DYNAMIC_FTRACE=y CONFIG_FPROBE=y @@ -106,6 +108,7 @@ CONFIG_SECURITY=y CONFIG_SECURITYFS=y CONFIG_SYN_COOKIES=y CONFIG_TEST_BPF=m +CONFIG_UDMABUF=y CONFIG_USERFAULTFD=y CONFIG_VSOCKETS=y CONFIG_VXLAN=y diff --git a/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c new file mode 100644 index 000000000000..dc740bd0e2bd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google */ + +#include +#include +#include +#include "dmabuf_iter.skel.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +static int udmabuf = -1; +static const char udmabuf_test_buffer_name[DMA_BUF_NAME_LEN] = "udmabuf_test_buffer_for_iter"; +static size_t udmabuf_test_buffer_size; +static int sysheap_dmabuf = -1; +static const char sysheap_test_buffer_name[DMA_BUF_NAME_LEN] = "sysheap_test_buffer_for_iter"; +static size_t sysheap_test_buffer_size; + +static int create_udmabuf(void) +{ + struct udmabuf_create create; + int dev_udmabuf, memfd, local_udmabuf; + + udmabuf_test_buffer_size = 10 * getpagesize(); + + if (!ASSERT_LE(sizeof(udmabuf_test_buffer_name), DMA_BUF_NAME_LEN, "NAMETOOLONG")) + return -1; + + memfd = memfd_create("memfd_test", MFD_ALLOW_SEALING); + if (!ASSERT_OK_FD(memfd, "memfd_create")) + return -1; + + if (!ASSERT_OK(ftruncate(memfd, udmabuf_test_buffer_size), "ftruncate")) + goto close_memfd; + + if (!ASSERT_OK(fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK), "seal")) + goto close_memfd; + + dev_udmabuf = open("/dev/udmabuf", O_RDONLY); + if (!ASSERT_OK_FD(dev_udmabuf, "open udmabuf")) + goto close_memfd; + + memset(&create, 0, sizeof(create)); + create.memfd = memfd; + create.flags = UDMABUF_FLAGS_CLOEXEC; + create.offset = 0; + create.size = udmabuf_test_buffer_size; + + local_udmabuf = ioctl(dev_udmabuf, UDMABUF_CREATE, &create); + close(dev_udmabuf); + if (!ASSERT_OK_FD(local_udmabuf, "udmabuf_create")) + goto close_memfd; + + if (!ASSERT_OK(ioctl(local_udmabuf, DMA_BUF_SET_NAME_B, udmabuf_test_buffer_name), "name")) + goto close_udmabuf; + + return local_udmabuf; + +close_udmabuf: + close(local_udmabuf); +close_memfd: + close(memfd); + return -1; +} + +static int create_sys_heap_dmabuf(void) +{ + sysheap_test_buffer_size = 20 * getpagesize(); + + struct dma_heap_allocation_data data = { + .len = sysheap_test_buffer_size, + .fd = 0, + .fd_flags = O_RDWR | O_CLOEXEC, + .heap_flags = 0, + }; + int heap_fd, ret; + + if (!ASSERT_LE(sizeof(sysheap_test_buffer_name), DMA_BUF_NAME_LEN, "NAMETOOLONG")) + return -1; + + heap_fd = open("/dev/dma_heap/system", O_RDONLY); + if (!ASSERT_OK_FD(heap_fd, "open dma heap")) + return -1; + + ret = ioctl(heap_fd, DMA_HEAP_IOCTL_ALLOC, &data); + close(heap_fd); + if (!ASSERT_OK(ret, "syheap alloc")) + return -1; + + if (!ASSERT_OK(ioctl(data.fd, DMA_BUF_SET_NAME_B, sysheap_test_buffer_name), "name")) + goto close_sysheap_dmabuf; + + return data.fd; + +close_sysheap_dmabuf: + close(data.fd); + return -1; +} + +static int create_test_buffers(void) +{ + udmabuf = create_udmabuf(); + sysheap_dmabuf = create_sys_heap_dmabuf(); + + if (udmabuf < 0 || sysheap_dmabuf < 0) + return -1; + + return 0; +} + +static void destroy_test_buffers(void) +{ + close(udmabuf); + udmabuf = -1; + + close(sysheap_dmabuf); + sysheap_dmabuf = -1; +} + +enum Fields { INODE, SIZE, NAME, EXPORTER, FIELD_COUNT }; +struct DmabufInfo { + unsigned long inode; + unsigned long size; + char name[DMA_BUF_NAME_LEN]; + char exporter[32]; +}; + +static bool check_dmabuf_info(const struct DmabufInfo *bufinfo, + unsigned long size, + const char *name, const char *exporter) +{ + return size == bufinfo->size && + !strcmp(name, bufinfo->name) && + !strcmp(exporter, bufinfo->exporter); +} + +static void subtest_dmabuf_iter_check_no_infinite_reads(struct dmabuf_iter *skel) +{ + int iter_fd; + char buf[256]; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.dmabuf_collector)); + if (!ASSERT_OK_FD(iter_fd, "iter_create")) + return; + + while (read(iter_fd, buf, sizeof(buf)) > 0) + ; /* Read out all contents */ + + /* Next reads should return 0 */ + ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read"); + + close(iter_fd); +} + +static void subtest_dmabuf_iter_check_default_iter(struct dmabuf_iter *skel) +{ + bool found_test_sysheap_dmabuf = false; + bool found_test_udmabuf = false; + struct DmabufInfo bufinfo; + size_t linesize = 0; + char *line = NULL; + FILE *iter_file; + int iter_fd, f = INODE; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.dmabuf_collector)); + if (!ASSERT_OK_FD(iter_fd, "iter_create")) + return; + + iter_file = fdopen(iter_fd, "r"); + if (!ASSERT_OK_PTR(iter_file, "fdopen")) + goto close_iter_fd; + + while (getline(&line, &linesize, iter_file) != -1) { + if (f % FIELD_COUNT == INODE) { + ASSERT_EQ(sscanf(line, "%ld", &bufinfo.inode), 1, + "read inode"); + } else if (f % FIELD_COUNT == SIZE) { + ASSERT_EQ(sscanf(line, "%ld", &bufinfo.size), 1, + "read size"); + } else if (f % FIELD_COUNT == NAME) { + ASSERT_EQ(sscanf(line, "%s", bufinfo.name), 1, + "read name"); + } else if (f % FIELD_COUNT == EXPORTER) { + ASSERT_EQ(sscanf(line, "%31s", bufinfo.exporter), 1, + "read exporter"); + + if (check_dmabuf_info(&bufinfo, + sysheap_test_buffer_size, + sysheap_test_buffer_name, + "system")) + found_test_sysheap_dmabuf = true; + else if (check_dmabuf_info(&bufinfo, + udmabuf_test_buffer_size, + udmabuf_test_buffer_name, + "udmabuf")) + found_test_udmabuf = true; + } + ++f; + } + + ASSERT_EQ(f % FIELD_COUNT, INODE, "number of fields"); + + ASSERT_TRUE(found_test_sysheap_dmabuf, "found_test_sysheap_dmabuf"); + ASSERT_TRUE(found_test_udmabuf, "found_test_udmabuf"); + + free(line); + fclose(iter_file); +close_iter_fd: + close(iter_fd); +} + +void test_dmabuf_iter(void) +{ + struct dmabuf_iter *skel = NULL; + + skel = dmabuf_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "dmabuf_iter__open_and_load")) + return; + + if (!ASSERT_OK(create_test_buffers(), "create_test_buffers")) + goto destroy; + + if (!ASSERT_OK(dmabuf_iter__attach(skel), "skel_attach")) + goto destroy; + + if (test__start_subtest("no_infinite_reads")) + subtest_dmabuf_iter_check_no_infinite_reads(skel); + if (test__start_subtest("default_iter")) + subtest_dmabuf_iter_check_default_iter(skel); + +destroy: + destroy_test_buffers(); + dmabuf_iter__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c new file mode 100644 index 000000000000..e53d7646d07a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Google LLC */ +#include +#include +#include + +/* From uapi/linux/dma-buf.h */ +#define DMA_BUF_NAME_LEN 32 + +char _license[] SEC("license") = "GPL"; + +/* + * Fields output by this iterator are delimited by newlines. Convert any + * newlines in user-provided printed strings to spaces. + */ +static void sanitize_string(char *src, size_t size) +{ + for (char *c = src; (size_t)(c - src) < size && *c; ++c) + if (*c == '\n') + *c = ' '; +} + +SEC("iter/dmabuf") +int dmabuf_collector(struct bpf_iter__dmabuf *ctx) +{ + const struct dma_buf *dmabuf = ctx->dmabuf; + struct seq_file *seq = ctx->meta->seq; + unsigned long inode = 0; + size_t size; + const char *pname, *exporter; + char name[DMA_BUF_NAME_LEN] = {'\0'}; + + if (!dmabuf) + return 0; + + if (BPF_CORE_READ_INTO(&inode, dmabuf, file, f_inode, i_ino) || + bpf_core_read(&size, sizeof(size), &dmabuf->size) || + bpf_core_read(&pname, sizeof(pname), &dmabuf->name) || + bpf_core_read(&exporter, sizeof(exporter), &dmabuf->exp_name)) + return 1; + + /* Buffers are not required to be named */ + if (pname) { + if (bpf_probe_read_kernel(name, sizeof(name), pname)) + return 1; + + /* Name strings can be provided by userspace */ + sanitize_string(name, sizeof(name)); + } + + BPF_SEQ_PRINTF(seq, "%lu\n%llu\n%s\n%s\n", inode, size, name, exporter); + return 0; +} From 7594dcb71ff87f9f0b11a13b32309c96960f880a Mon Sep 17 00:00:00 2001 From: "T.J. Mercier" Date: Thu, 22 May 2025 23:04:29 +0000 Subject: [PATCH 124/135] selftests/bpf: Add test for open coded dmabuf_iter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the same test buffers as the traditional iterator and a new BPF map to verify the test buffers can be found with the open coded dmabuf iterator. Signed-off-by: T.J. Mercier Acked-by: Christian König Acked-by: Song Liu Link: https://lore.kernel.org/r/20250522230429.941193-6-tjmercier@google.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/bpf_experimental.h | 5 ++ .../selftests/bpf/prog_tests/dmabuf_iter.c | 41 ++++++++++++++++ .../testing/selftests/bpf/progs/dmabuf_iter.c | 48 +++++++++++++++++++ 3 files changed, 94 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 6535c8ae3c46..5e512a1d09d1 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -591,4 +591,9 @@ extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym; extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym; +struct bpf_iter_dmabuf; +extern int bpf_iter_dmabuf_new(struct bpf_iter_dmabuf *it) __weak __ksym; +extern struct dma_buf *bpf_iter_dmabuf_next(struct bpf_iter_dmabuf *it) __weak __ksym; +extern void bpf_iter_dmabuf_destroy(struct bpf_iter_dmabuf *it) __weak __ksym; + #endif diff --git a/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c index dc740bd0e2bd..6c2b0c3dbcd8 100644 --- a/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/dmabuf_iter.c @@ -219,14 +219,52 @@ close_iter_fd: close(iter_fd); } +static void subtest_dmabuf_iter_check_open_coded(struct dmabuf_iter *skel, int map_fd) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + char key[DMA_BUF_NAME_LEN]; + int err, fd; + bool found; + + /* No need to attach it, just run it directly */ + fd = bpf_program__fd(skel->progs.iter_dmabuf_for_each); + + err = bpf_prog_test_run_opts(fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + if (!ASSERT_OK(bpf_map_get_next_key(map_fd, NULL, key), "get next key")) + return; + + do { + ASSERT_OK(bpf_map_lookup_elem(map_fd, key, &found), "lookup"); + ASSERT_TRUE(found, "found test buffer"); + } while (bpf_map_get_next_key(map_fd, key, key)); +} + void test_dmabuf_iter(void) { struct dmabuf_iter *skel = NULL; + int map_fd; + const bool f = false; skel = dmabuf_iter__open_and_load(); if (!ASSERT_OK_PTR(skel, "dmabuf_iter__open_and_load")) return; + map_fd = bpf_map__fd(skel->maps.testbuf_hash); + if (!ASSERT_OK_FD(map_fd, "map_fd")) + goto destroy_skel; + + if (!ASSERT_OK(bpf_map_update_elem(map_fd, udmabuf_test_buffer_name, &f, BPF_ANY), + "insert udmabuf")) + goto destroy_skel; + if (!ASSERT_OK(bpf_map_update_elem(map_fd, sysheap_test_buffer_name, &f, BPF_ANY), + "insert sysheap buffer")) + goto destroy_skel; + if (!ASSERT_OK(create_test_buffers(), "create_test_buffers")) goto destroy; @@ -237,8 +275,11 @@ void test_dmabuf_iter(void) subtest_dmabuf_iter_check_no_infinite_reads(skel); if (test__start_subtest("default_iter")) subtest_dmabuf_iter_check_default_iter(skel); + if (test__start_subtest("open_coded")) + subtest_dmabuf_iter_check_open_coded(skel, map_fd); destroy: destroy_test_buffers(); +destroy_skel: dmabuf_iter__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/dmabuf_iter.c b/tools/testing/selftests/bpf/progs/dmabuf_iter.c index e53d7646d07a..13cdb11fdeb2 100644 --- a/tools/testing/selftests/bpf/progs/dmabuf_iter.c +++ b/tools/testing/selftests/bpf/progs/dmabuf_iter.c @@ -9,6 +9,13 @@ char _license[] SEC("license") = "GPL"; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, DMA_BUF_NAME_LEN); + __type(value, bool); + __uint(max_entries, 5); +} testbuf_hash SEC(".maps"); + /* * Fields output by this iterator are delimited by newlines. Convert any * newlines in user-provided printed strings to spaces. @@ -51,3 +58,44 @@ int dmabuf_collector(struct bpf_iter__dmabuf *ctx) BPF_SEQ_PRINTF(seq, "%lu\n%llu\n%s\n%s\n", inode, size, name, exporter); return 0; } + +SEC("syscall") +int iter_dmabuf_for_each(const void *ctx) +{ + struct dma_buf *d; + + bpf_for_each(dmabuf, d) { + char name[DMA_BUF_NAME_LEN]; + const char *pname; + bool *found; + long len; + int i; + + if (bpf_core_read(&pname, sizeof(pname), &d->name)) + return 1; + + /* Buffers are not required to be named */ + if (!pname) + continue; + + len = bpf_probe_read_kernel_str(name, sizeof(name), pname); + if (len < 0) + return 1; + + /* + * The entire name buffer is used as a map key. + * Zeroize any uninitialized trailing bytes after the NUL. + */ + bpf_for(i, len, DMA_BUF_NAME_LEN) + name[i] = 0; + + found = bpf_map_lookup_elem(&testbuf_hash, name); + if (found) { + bool t = true; + + bpf_map_update_elem(&testbuf_hash, name, &t, BPF_EXIST); + } + } + + return 0; +} From d848bba68034847e39b82694be7fabed0e9d0d1e Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 23 May 2025 13:53:21 -0700 Subject: [PATCH 125/135] bpf: Remove special_kfunc_set from verifier Currently, the verifier has both special_kfunc_set and special_kfunc_list. When adding a new kfunc usage to the verifier, it is often confusing about whether special_kfunc_set or special_kfunc_list or both should add that kfunc. For example, some kfuncs, e.g., bpf_dynptr_from_skb, bpf_dynptr_clone, bpf_wq_set_callback_impl, does not need to be in special_kfunc_set. To avoid potential future confusion, special_kfunc_set is deleted and btf_id_set_contains(&special_kfunc_set, ...) is removed. The code is refactored with a new func check_special_kfunc(), which contains all codes covered by original branch meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id) There is no functionality change. Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20250523205321.1291431-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 374 ++++++++++++++++++++---------------------- 1 file changed, 177 insertions(+), 197 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d5807d2efc92..639e9bb94af2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12107,44 +12107,6 @@ enum special_kfunc_type { KF_bpf_res_spin_unlock_irqrestore, }; -BTF_SET_START(special_kfunc_set) -BTF_ID(func, bpf_obj_new_impl) -BTF_ID(func, bpf_obj_drop_impl) -BTF_ID(func, bpf_refcount_acquire_impl) -BTF_ID(func, bpf_list_push_front_impl) -BTF_ID(func, bpf_list_push_back_impl) -BTF_ID(func, bpf_list_pop_front) -BTF_ID(func, bpf_list_pop_back) -BTF_ID(func, bpf_list_front) -BTF_ID(func, bpf_list_back) -BTF_ID(func, bpf_cast_to_kern_ctx) -BTF_ID(func, bpf_rdonly_cast) -BTF_ID(func, bpf_rbtree_remove) -BTF_ID(func, bpf_rbtree_add_impl) -BTF_ID(func, bpf_rbtree_first) -BTF_ID(func, bpf_rbtree_root) -BTF_ID(func, bpf_rbtree_left) -BTF_ID(func, bpf_rbtree_right) -#ifdef CONFIG_NET -BTF_ID(func, bpf_dynptr_from_skb) -BTF_ID(func, bpf_dynptr_from_xdp) -#endif -BTF_ID(func, bpf_dynptr_slice) -BTF_ID(func, bpf_dynptr_slice_rdwr) -BTF_ID(func, bpf_dynptr_clone) -BTF_ID(func, bpf_percpu_obj_new_impl) -BTF_ID(func, bpf_percpu_obj_drop_impl) -BTF_ID(func, bpf_throw) -BTF_ID(func, bpf_wq_set_callback_impl) -#ifdef CONFIG_CGROUPS -BTF_ID(func, bpf_iter_css_task_new) -#endif -#ifdef CONFIG_BPF_LSM -BTF_ID(func, bpf_set_dentry_xattr) -BTF_ID(func, bpf_remove_dentry_xattr) -#endif -BTF_SET_END(special_kfunc_set) - BTF_ID_LIST(special_kfunc_list) BTF_ID(func, bpf_obj_new_impl) BTF_ID(func, bpf_obj_drop_impl) @@ -13452,6 +13414,178 @@ static int fetch_kfunc_meta(struct bpf_verifier_env *env, return 0; } +/* check special kfuncs and return: + * 1 - not fall-through to 'else' branch, continue verification + * 0 - fall-through to 'else' branch + * < 0 - not fall-through to 'else' branch, return error + */ +static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, + struct bpf_reg_state *regs, struct bpf_insn_aux_data *insn_aux, + const struct btf_type *ptr_type, struct btf *desc_btf) +{ + const struct btf_type *ret_t; + int err = 0; + + if (meta->btf != btf_vmlinux) + return 0; + + if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || + meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { + struct btf_struct_meta *struct_meta; + struct btf *ret_btf; + u32 ret_btf_id; + + if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) + return -ENOMEM; + + if (((u64)(u32)meta->arg_constant.value) != meta->arg_constant.value) { + verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); + return -EINVAL; + } + + ret_btf = env->prog->aux->btf; + ret_btf_id = meta->arg_constant.value; + + /* This may be NULL due to user not supplying a BTF */ + if (!ret_btf) { + verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n"); + return -EINVAL; + } + + ret_t = btf_type_by_id(ret_btf, ret_btf_id); + if (!ret_t || !__btf_type_is_struct(ret_t)) { + verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n"); + return -EINVAL; + } + + if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { + if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { + verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n", + ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); + return -EINVAL; + } + + if (!bpf_global_percpu_ma_set) { + mutex_lock(&bpf_percpu_ma_lock); + if (!bpf_global_percpu_ma_set) { + /* Charge memory allocated with bpf_global_percpu_ma to + * root memcg. The obj_cgroup for root memcg is NULL. + */ + err = bpf_mem_alloc_percpu_init(&bpf_global_percpu_ma, NULL); + if (!err) + bpf_global_percpu_ma_set = true; + } + mutex_unlock(&bpf_percpu_ma_lock); + if (err) + return err; + } + + mutex_lock(&bpf_percpu_ma_lock); + err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); + mutex_unlock(&bpf_percpu_ma_lock); + if (err) + return err; + } + + struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); + if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { + if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) { + verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n"); + return -EINVAL; + } + + if (struct_meta) { + verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n"); + return -EINVAL; + } + } + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; + regs[BPF_REG_0].btf = ret_btf; + regs[BPF_REG_0].btf_id = ret_btf_id; + if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) + regs[BPF_REG_0].type |= MEM_PERCPU; + + insn_aux->obj_new_size = ret_t->size; + insn_aux->kptr_struct_meta = struct_meta; + } else if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; + regs[BPF_REG_0].btf = meta->arg_btf; + regs[BPF_REG_0].btf_id = meta->arg_btf_id; + + insn_aux->kptr_struct_meta = + btf_find_struct_meta(meta->arg_btf, + meta->arg_btf_id); + } else if (is_list_node_type(ptr_type)) { + struct btf_field *field = meta->arg_list_head.field; + + mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); + } else if (is_rbtree_node_type(ptr_type)) { + struct btf_field *field = meta->arg_rbtree_root.field; + + mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); + } else if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; + regs[BPF_REG_0].btf = desc_btf; + regs[BPF_REG_0].btf_id = meta->ret_btf_id; + } else if (meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { + ret_t = btf_type_by_id(desc_btf, meta->arg_constant.value); + if (!ret_t || !btf_type_is_struct(ret_t)) { + verbose(env, + "kfunc bpf_rdonly_cast type ID argument must be of a struct\n"); + return -EINVAL; + } + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; + regs[BPF_REG_0].btf = desc_btf; + regs[BPF_REG_0].btf_id = meta->arg_constant.value; + } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice] || + meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { + enum bpf_type_flag type_flag = get_dynptr_type_flag(meta->initialized_dynptr.type); + + mark_reg_known_zero(env, regs, BPF_REG_0); + + if (!meta->arg_constant.found) { + verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); + return -EFAULT; + } + + regs[BPF_REG_0].mem_size = meta->arg_constant.value; + + /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ + regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; + + if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { + regs[BPF_REG_0].type |= MEM_RDONLY; + } else { + /* this will set env->seen_direct_write to true */ + if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { + verbose(env, "the prog does not allow writes to packet data\n"); + return -EINVAL; + } + } + + if (!meta->initialized_dynptr.id) { + verbose(env, "verifier internal error: no dynptr id\n"); + return -EFAULT; + } + regs[BPF_REG_0].dynptr_id = meta->initialized_dynptr.id; + + /* we don't need to set BPF_REG_0's ref obj id + * because packet slices are not refcounted (see + * dynptr_type_refcounted) + */ + } else { + return 0; + } + + return 1; +} + static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name); static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, @@ -13466,7 +13600,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_insn_aux_data *insn_aux; int err, insn_idx = *insn_idx_p; const struct btf_param *args; - const struct btf_type *ret_t; struct btf *desc_btf; /* skip for now, but return error when we find this in fixup_kfunc_call */ @@ -13686,163 +13819,10 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, mark_btf_func_reg_size(env, BPF_REG_0, t->size); } else if (btf_type_is_ptr(t)) { ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); - - if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { - if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] || - meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { - struct btf_struct_meta *struct_meta; - struct btf *ret_btf; - u32 ret_btf_id; - - if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) - return -ENOMEM; - - if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) { - verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); - return -EINVAL; - } - - ret_btf = env->prog->aux->btf; - ret_btf_id = meta.arg_constant.value; - - /* This may be NULL due to user not supplying a BTF */ - if (!ret_btf) { - verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n"); - return -EINVAL; - } - - ret_t = btf_type_by_id(ret_btf, ret_btf_id); - if (!ret_t || !__btf_type_is_struct(ret_t)) { - verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n"); - return -EINVAL; - } - - if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { - if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { - verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n", - ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); - return -EINVAL; - } - - if (!bpf_global_percpu_ma_set) { - mutex_lock(&bpf_percpu_ma_lock); - if (!bpf_global_percpu_ma_set) { - /* Charge memory allocated with bpf_global_percpu_ma to - * root memcg. The obj_cgroup for root memcg is NULL. - */ - err = bpf_mem_alloc_percpu_init(&bpf_global_percpu_ma, NULL); - if (!err) - bpf_global_percpu_ma_set = true; - } - mutex_unlock(&bpf_percpu_ma_lock); - if (err) - return err; - } - - mutex_lock(&bpf_percpu_ma_lock); - err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); - mutex_unlock(&bpf_percpu_ma_lock); - if (err) - return err; - } - - struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); - if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { - if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) { - verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n"); - return -EINVAL; - } - - if (struct_meta) { - verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n"); - return -EINVAL; - } - } - - mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; - regs[BPF_REG_0].btf = ret_btf; - regs[BPF_REG_0].btf_id = ret_btf_id; - if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) - regs[BPF_REG_0].type |= MEM_PERCPU; - - insn_aux->obj_new_size = ret_t->size; - insn_aux->kptr_struct_meta = struct_meta; - } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { - mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; - regs[BPF_REG_0].btf = meta.arg_btf; - regs[BPF_REG_0].btf_id = meta.arg_btf_id; - - insn_aux->kptr_struct_meta = - btf_find_struct_meta(meta.arg_btf, - meta.arg_btf_id); - } else if (is_list_node_type(ptr_type)) { - struct btf_field *field = meta.arg_list_head.field; - - mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); - } else if (is_rbtree_node_type(ptr_type)) { - struct btf_field *field = meta.arg_rbtree_root.field; - - mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); - } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { - mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; - regs[BPF_REG_0].btf = desc_btf; - regs[BPF_REG_0].btf_id = meta.ret_btf_id; - } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { - ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value); - if (!ret_t || !btf_type_is_struct(ret_t)) { - verbose(env, - "kfunc bpf_rdonly_cast type ID argument must be of a struct\n"); - return -EINVAL; - } - - mark_reg_known_zero(env, regs, BPF_REG_0); - regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; - regs[BPF_REG_0].btf = desc_btf; - regs[BPF_REG_0].btf_id = meta.arg_constant.value; - } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] || - meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { - enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type); - - mark_reg_known_zero(env, regs, BPF_REG_0); - - if (!meta.arg_constant.found) { - verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); - return -EFAULT; - } - - regs[BPF_REG_0].mem_size = meta.arg_constant.value; - - /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ - regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; - - if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { - regs[BPF_REG_0].type |= MEM_RDONLY; - } else { - /* this will set env->seen_direct_write to true */ - if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { - verbose(env, "the prog does not allow writes to packet data\n"); - return -EINVAL; - } - } - - if (!meta.initialized_dynptr.id) { - verbose(env, "verifier internal error: no dynptr id\n"); - return -EFAULT; - } - regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id; - - /* we don't need to set BPF_REG_0's ref obj id - * because packet slices are not refcounted (see - * dynptr_type_refcounted) - */ - } else { - verbose(env, "kernel function %s unhandled dynamic return type\n", - meta.func_name); - return -EFAULT; - } + err = check_special_kfunc(env, &meta, regs, insn_aux, ptr_type, desc_btf); + if (err) { + if (err < 0) + return err; } else if (btf_type_is_void(ptr_type)) { /* kfunc returning 'void *' is equivalent to returning scalar */ mark_reg_unknown(env, regs, BPF_REG_0); @@ -13918,7 +13898,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) regs[BPF_REG_0].id = ++env->id_gen; } else if (btf_type_is_void(t)) { - if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { + if (meta.btf == btf_vmlinux) { if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { insn_aux->kptr_struct_meta = From f95695f2c46592b4260032736a9bfc6e2a01c77c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 23 May 2025 13:53:26 -0700 Subject: [PATCH 126/135] bpf: Warn with __bpf_trap() kfunc maybe due to uninitialized variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Marc Suñé (Isovalent, part of Cisco) reported an issue where an uninitialized variable caused generating bpf prog binary code not working as expected. The reproducer is in [1] where the flags “-Wall -Werror” are enabled, but there is no warning as the compiler takes advantage of uninitialized variable to do aggressive optimization. The optimized code looks like below: ; { 0: bf 16 00 00 00 00 00 00 r6 = r1 ; bpf_printk("Start"); 1: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0x0 ll 0000000000000008: R_BPF_64_64 .rodata 3: b4 02 00 00 06 00 00 00 w2 = 0x6 4: 85 00 00 00 06 00 00 00 call 0x6 ; DEFINE_FUNC_CTX_POINTER(data) 5: 61 61 4c 00 00 00 00 00 w1 = *(u32 *)(r6 + 0x4c) ; bpf_printk("pre ipv6_hdrlen_offset"); 6: 18 01 00 00 06 00 00 00 00 00 00 00 00 00 00 00 r1 = 0x6 ll 0000000000000030: R_BPF_64_64 .rodata 8: b4 02 00 00 17 00 00 00 w2 = 0x17 9: 85 00 00 00 06 00 00 00 call 0x6 The verifier will report the following failure: 9: (85) call bpf_trace_printk#6 last insn is not an exit or jmp The above verifier log does not give a clear hint about how to fix the problem and user may take quite some time to figure out that the issue is due to compiler taking advantage of uninitialized variable. In llvm internals, uninitialized variable usage may generate 'unreachable' IR insn and these 'unreachable' IR insns may indicate uninitialized variable impact on code optimization. So far, llvm BPF backend ignores 'unreachable' IR hence the above code is generated. With clang21 patch [2], those 'unreachable' IR insn are converted to func __bpf_trap(). In order to maintain proper control flow graph for bpf progs, [2] also adds an 'exit' insn after bpf_trap() if __bpf_trap() is the last insn in the function. The new code looks like: ; { 0: bf 16 00 00 00 00 00 00 r6 = r1 ; bpf_printk("Start"); 1: 18 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 r1 = 0x0 ll 0000000000000008: R_BPF_64_64 .rodata 3: b4 02 00 00 06 00 00 00 w2 = 0x6 4: 85 00 00 00 06 00 00 00 call 0x6 ; DEFINE_FUNC_CTX_POINTER(data) 5: 61 61 4c 00 00 00 00 00 w1 = *(u32 *)(r6 + 0x4c) ; bpf_printk("pre ipv6_hdrlen_offset"); 6: 18 01 00 00 06 00 00 00 00 00 00 00 00 00 00 00 r1 = 0x6 ll 0000000000000030: R_BPF_64_64 .rodata 8: b4 02 00 00 17 00 00 00 w2 = 0x17 9: 85 00 00 00 06 00 00 00 call 0x6 10: 85 10 00 00 ff ff ff ff call -0x1 0000000000000050: R_BPF_64_32 __bpf_trap 11: 95 00 00 00 00 00 00 00 exit In kernel, a new kfunc __bpf_trap() is added. During insn verification, any hit with __bpf_trap() will result in verification failure. The kernel is able to provide better log message for debugging. With llvm patch [2] and without this patch (no __bpf_trap() kfunc for existing kernel), e.g., for old kernels, the verifier outputs 10: kfunc '__bpf_trap' is referenced but wasn't resolved Basically, kernel does not support __bpf_trap() kfunc. This still didn't give clear signals about possible reason. With llvm patch [2] and with this patch, the verifier outputs 10: (85) call __bpf_trap#74479 unexpected __bpf_trap() due to uninitialized variable? It gives much better hints for verification failure. [1] https://github.com/msune/clang_bpf/blob/main/Makefile#L3 [2] https://github.com/llvm/llvm-project/pull/131731 Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20250523205326.1291640-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 5 +++++ kernel/bpf/verifier.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index bd17ed5bfc4b..376403707a85 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -3273,6 +3273,10 @@ __bpf_kfunc void bpf_local_irq_restore(unsigned long *flags__irq_flag) local_irq_restore(*flags__irq_flag); } +__bpf_kfunc void __bpf_trap(void) +{ +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -3391,6 +3395,7 @@ BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE) #endif +BTF_ID_FLAGS(func, __bpf_trap) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 639e9bb94af2..99582e5a8c69 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12105,6 +12105,7 @@ enum special_kfunc_type { KF_bpf_res_spin_unlock, KF_bpf_res_spin_lock_irqsave, KF_bpf_res_spin_unlock_irqrestore, + KF___bpf_trap, }; BTF_ID_LIST(special_kfunc_list) @@ -12170,6 +12171,7 @@ BTF_ID(func, bpf_res_spin_lock) BTF_ID(func, bpf_res_spin_unlock) BTF_ID(func, bpf_res_spin_lock_irqsave) BTF_ID(func, bpf_res_spin_unlock_irqrestore) +BTF_ID(func, __bpf_trap) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -13641,6 +13643,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return err; } __mark_btf_func_reg_size(env, regs, BPF_REG_0, sizeof(u32)); + } else if (!insn->off && insn->imm == special_kfunc_list[KF___bpf_trap]) { + verbose(env, "unexpected __bpf_trap() due to uninitialized variable?\n"); + return -EFAULT; } if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) { From 92de53d247dffdf29d2abecf7deb4760dde98d6c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 23 May 2025 13:53:31 -0700 Subject: [PATCH 127/135] selftests/bpf: Add unit tests with __bpf_trap() kfunc Add some inline-asm tests and C tests where __bpf_trap() or __builtin_trap() is used in the code. The __builtin_trap() test is guarded with llvm21 ([1]) since otherwise the compilation failure will happen. [1] https://github.com/llvm/llvm-project/pull/131731 Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20250523205331.1291734-1-yonghong.song@linux.dev Tested-by: Eduard Zingerman Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_bpf_trap.c | 71 +++++++++++++++++++ 2 files changed, 73 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_bpf_trap.c diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index e66a57970d28..c9da06741104 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -14,6 +14,7 @@ #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_bpf_get_stack.skel.h" +#include "verifier_bpf_trap.skel.h" #include "verifier_bswap.skel.h" #include "verifier_btf_ctx_access.skel.h" #include "verifier_btf_unreliable_prog.skel.h" @@ -148,6 +149,7 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } +void test_verifier_bpf_trap(void) { RUN(verifier_bpf_trap); } void test_verifier_bswap(void) { RUN(verifier_bswap); } void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } void test_verifier_btf_unreliable_prog(void) { RUN(verifier_btf_unreliable_prog); } diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c new file mode 100644 index 000000000000..35e2cdc00a01 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_trap.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include "bpf_misc.h" + +#if __clang_major__ >= 21 && 0 +SEC("socket") +__description("__builtin_trap with simple c code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +void bpf_builtin_trap_with_simple_c(void) +{ + __builtin_trap(); +} +#endif + +SEC("socket") +__description("__bpf_trap with simple c code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +void bpf_trap_with_simple_c(void) +{ + __bpf_trap(); +} + +SEC("socket") +__description("__bpf_trap as the second-from-last insn") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +__naked void bpf_trap_at_func_end(void) +{ + asm volatile ( + "r0 = 0;" + "call %[__bpf_trap];" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +SEC("socket") +__description("dead code __bpf_trap in the middle of code") +__success +__naked void dead_bpf_trap_in_middle(void) +{ + asm volatile ( + "r0 = 0;" + "if r0 == 0 goto +1;" + "call %[__bpf_trap];" + "r0 = 2;" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +SEC("socket") +__description("reachable __bpf_trap in the middle of code") +__failure __msg("unexpected __bpf_trap() due to uninitialized variable?") +__naked void live_bpf_trap_in_middle(void) +{ + asm volatile ( + "r0 = 0;" + "if r0 == 1 goto +1;" + "call %[__bpf_trap];" + "r0 = 2;" + "exit;" + : + : __imm(__bpf_trap) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; From 1ae7a84ed85317b12a3395470f0cfcc900b2a61a Mon Sep 17 00:00:00 2001 From: Jiayuan Chen Date: Fri, 16 May 2025 22:47:02 +0800 Subject: [PATCH 128/135] bpftool: Add support for custom BTF path in prog load/loadall This patch exposes the btf_custom_path feature to bpftool, allowing users to specify a custom BTF file when loading BPF programs using prog load or prog loadall commands. The argument 'btf_custom_path' in libbpf is used for those kernels that don't have CONFIG_DEBUG_INFO_BTF enabled but still want to perform CO-RE relocations. Suggested-by: Quentin Monnet Reviewed-by: Quentin Monnet Signed-off-by: Jiayuan Chen Link: https://lore.kernel.org/r/20250516144708.298652-1-jiayuan.chen@linux.dev Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/Documentation/bpftool-prog.rst | 10 ++++++++-- tools/bpf/bpftool/bash-completion/bpftool | 4 ++-- tools/bpf/bpftool/prog.c | 12 +++++++++++- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index d6304e01afe0..da3152c16228 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -31,7 +31,7 @@ PROG COMMANDS | **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }] | **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }] | **bpftool** **prog pin** *PROG* *FILE* -| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**] +| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**] [**kernel_btf** *BTF_FILE*] | **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*] | **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*] | **bpftool** **prog tracelog** @@ -127,7 +127,7 @@ bpftool prog pin *PROG* *FILE* Note: *FILE* must be located in *bpffs* mount. It must not contain a dot character ('.'), which is reserved for future extensions of *bpffs*. -bpftool prog { load | loadall } *OBJ* *PATH* [type *TYPE*] [map { idx *IDX* | name *NAME* } *MAP*] [{ offload_dev | xdpmeta_dev } *NAME*] [pinmaps *MAP_DIR*] [autoattach] +bpftool prog { load | loadall } *OBJ* *PATH* [type *TYPE*] [map { idx *IDX* | name *NAME* } *MAP*] [{ offload_dev | xdpmeta_dev } *NAME*] [pinmaps *MAP_DIR*] [autoattach] [kernel_btf *BTF_FILE*] Load bpf program(s) from binary *OBJ* and pin as *PATH*. **bpftool prog load** pins only the first program from the *OBJ* as *PATH*. **bpftool prog loadall** pins all programs from the *OBJ* under *PATH* directory. **type** @@ -153,6 +153,12 @@ bpftool prog { load | loadall } *OBJ* *PATH* [type *TYPE*] [map { idx *IDX* | na program does not support autoattach, bpftool falls back to regular pinning for that program instead. + The **kernel_btf** option allows specifying an external BTF file to replace + the system's own vmlinux BTF file for CO-RE relocations. Note that any + other feature relying on BTF (such as fentry/fexit programs, struct_ops) + requires the BTF file for the actual kernel running on the host, often + exposed at /sys/kernel/btf/vmlinux. + Note: *PATH* must be located in *bpffs* mount. It must not contain a dot character ('.'), which is reserved for future extensions of *bpffs*. diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 1ce409a6cbd9..27512feb5c70 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -505,13 +505,13 @@ _bpftool() _bpftool_get_map_names return 0 ;; - pinned|pinmaps) + pinned|pinmaps|kernel_btf) _filedir return 0 ;; *) COMPREPLY=( $( compgen -W "map" -- "$cur" ) ) - _bpftool_once_attr 'type pinmaps autoattach' + _bpftool_once_attr 'type pinmaps autoattach kernel_btf' _bpftool_one_of_list 'offload_dev xdpmeta_dev' return 0 ;; diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index f010295350be..96eea8a67225 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1681,8 +1681,17 @@ offload_dev: } else if (is_prefix(*argv, "autoattach")) { auto_attach = true; NEXT_ARG(); + } else if (is_prefix(*argv, "kernel_btf")) { + NEXT_ARG(); + + if (!REQ_ARGS(1)) + goto err_free_reuse_maps; + + open_opts.btf_custom_path = GET_ARG(); } else { - p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?", + p_err("expected no more arguments, " + "'type', 'map', 'offload_dev', 'xdpmeta_dev', 'pinmaps', " + "'autoattach', or 'kernel_btf', got: '%s'?", *argv); goto err_free_reuse_maps; } @@ -2474,6 +2483,7 @@ static int do_help(int argc, char **argv) " [map { idx IDX | name NAME } MAP]\\\n" " [pinmaps MAP_DIR]\n" " [autoattach]\n" + " [kernel_btf BTF_FILE]\n" " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n" " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n" " %1$s %2$s run PROG \\\n" From 86bc9c742426a16b52a10ef61f5b721aecca2344 Mon Sep 17 00:00:00 2001 From: KaFai Wan Date: Mon, 26 May 2025 21:33:58 +0800 Subject: [PATCH 129/135] bpf: Avoid __bpf_prog_ret0_warn when jit fails syzkaller reported an issue: WARNING: CPU: 3 PID: 217 at kernel/bpf/core.c:2357 __bpf_prog_ret0_warn+0xa/0x20 kernel/bpf/core.c:2357 Modules linked in: CPU: 3 UID: 0 PID: 217 Comm: kworker/u32:6 Not tainted 6.15.0-rc4-syzkaller-00040-g8bac8898fe39 RIP: 0010:__bpf_prog_ret0_warn+0xa/0x20 kernel/bpf/core.c:2357 Call Trace: bpf_dispatcher_nop_func include/linux/bpf.h:1316 [inline] __bpf_prog_run include/linux/filter.h:718 [inline] bpf_prog_run include/linux/filter.h:725 [inline] cls_bpf_classify+0x74a/0x1110 net/sched/cls_bpf.c:105 ... When creating bpf program, 'fp->jit_requested' depends on bpf_jit_enable. This issue is triggered because of CONFIG_BPF_JIT_ALWAYS_ON is not set and bpf_jit_enable is set to 1, causing the arch to attempt JIT the prog, but jit failed due to FAULT_INJECTION. As a result, incorrectly treats the program as valid, when the program runs it calls `__bpf_prog_ret0_warn` and triggers the WARN_ON_ONCE(1). Reported-by: syzbot+0903f6d7f285e41cdf10@syzkaller.appspotmail.com Closes: https://lore.kernel.org/bpf/6816e34e.a70a0220.254cdc.002c.GAE@google.com Fixes: fa9dd599b4da ("bpf: get rid of pure_initcall dependency to enable jits") Signed-off-by: KaFai Wan Link: https://lore.kernel.org/r/20250526133358.2594176-1-mannkafai@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a3e571688421..c20babbf998f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2474,7 +2474,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) /* In case of BPF to BPF calls, verifier did all the prep * work with regards to JITing, etc. */ - bool jit_needed = false; + bool jit_needed = fp->jit_requested; if (fp->bpf_func) goto finalize; From d4965578267e2e81f67c86e2608481e77e9c8569 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Mon, 26 May 2025 14:25:34 +0800 Subject: [PATCH 130/135] bpf: Check rcu_read_lock_trace_held() in bpf_map_lookup_percpu_elem() bpf_map_lookup_percpu_elem() helper is also available for sleepable bpf program. When BPF JIT is disabled or under 32-bit host, bpf_map_lookup_percpu_elem() will not be inlined. Using it in a sleepable bpf program will trigger the warning in bpf_map_lookup_percpu_elem(), because the bpf program only holds rcu_read_lock_trace lock. Therefore, add the missed check. Reported-by: syzbot+dce5aae19ae4d6399986@syzkaller.appspotmail.com Closes: https://lore.kernel.org/bpf/000000000000176a130617420310@google.com/ Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20250526062534.1105938-1-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 376403707a85..b71e428ad936 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -130,7 +130,8 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = { BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) { - WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && + !rcu_read_lock_bh_held()); return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); } From 9014cf56f13d8ce21329837c148f723490950e28 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Tue, 27 May 2025 12:06:03 +0200 Subject: [PATCH 131/135] bpf, arm64: Support up to 12 function arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently ARM64 bpf trampoline supports up to 8 function arguments. According to the statistics from commit 473e3150e30a ("bpf, x86: allow function arguments up to 12 for TRACING"), there are about 200 functions accept 9 to 12 arguments, so adding support for up to 12 function arguments. Due to bpf only supporting function arguments up to 16 bytes, according to AAPCS64, starting from the first argument, each argument is first attempted to be loaded to 1 or 2 smallest registers from x0-x7, if there are no enough registers to hold the entire argument, then all remaining arguments starting from this one are pushed to the stack for passing. There are some non-trivial cases for which it is not possible to correctly read arguments from/write arguments to the stack: for example struct variables may have custom packing/alignment attributes that are invisible in BTF info. Such cases are denied for now to make sure not to read incorrect values. Signed-off-by: Xu Kuohai Co-developed-by: Alexis Lothoré (eBPF Foundation) Signed-off-by: Alexis Lothoré (eBPF Foundation) Link: https://lore.kernel.org/r/20250527-many_args_arm64-v3-1-3faf7bb8e4a2@bootlin.com Signed-off-by: Alexei Starovoitov --- arch/arm64/net/bpf_jit_comp.c | 227 ++++++++++++++++++++++++++-------- 1 file changed, 172 insertions(+), 55 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 70d7c89d3ac9..b5c3ab623536 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2064,7 +2064,7 @@ bool bpf_jit_supports_subprog_tailcalls(void) } static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, - int args_off, int retval_off, int run_ctx_off, + int bargs_off, int retval_off, int run_ctx_off, bool save_ret) { __le32 *branch; @@ -2106,7 +2106,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, branch = ctx->image + ctx->idx; emit(A64_NOP, ctx); - emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx); + emit(A64_ADD_I(1, A64_R(0), A64_SP, bargs_off), ctx); if (!p->jited) emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx); @@ -2131,7 +2131,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, } static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, - int args_off, int retval_off, int run_ctx_off, + int bargs_off, int retval_off, int run_ctx_off, __le32 **branches) { int i; @@ -2141,7 +2141,7 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, */ emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx); for (i = 0; i < tl->nr_links; i++) { - invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off, + invoke_bpf_prog(ctx, tl->links[i], bargs_off, retval_off, run_ctx_off, true); /* if (*(u64 *)(sp + retval_off) != 0) * goto do_fexit; @@ -2155,23 +2155,125 @@ static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl, } } -static void save_args(struct jit_ctx *ctx, int args_off, int nregs) -{ - int i; +struct arg_aux { + /* how many args are passed through registers, the rest of the args are + * passed through stack + */ + int args_in_regs; + /* how many registers are used to pass arguments */ + int regs_for_args; + /* how much stack is used for additional args passed to bpf program + * that did not fit in original function registers + */ + int bstack_for_args; + /* home much stack is used for additional args passed to the + * original function when called from trampoline (this one needs + * arguments to be properly aligned) + */ + int ostack_for_args; +}; - for (i = 0; i < nregs; i++) { - emit(A64_STR64I(i, A64_SP, args_off), ctx); - args_off += 8; +static int calc_arg_aux(const struct btf_func_model *m, + struct arg_aux *a) +{ + int stack_slots, nregs, slots, i; + + /* verifier ensures m->nr_args <= MAX_BPF_FUNC_ARGS */ + for (i = 0, nregs = 0; i < m->nr_args; i++) { + slots = (m->arg_size[i] + 7) / 8; + if (nregs + slots <= 8) /* passed through register ? */ + nregs += slots; + else + break; + } + + a->args_in_regs = i; + a->regs_for_args = nregs; + a->ostack_for_args = 0; + a->bstack_for_args = 0; + + /* the rest arguments are passed through stack */ + for (; i < m->nr_args; i++) { + /* We can not know for sure about exact alignment needs for + * struct passed on stack, so deny those + */ + if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) + return -ENOTSUPP; + stack_slots = (m->arg_size[i] + 7) / 8; + a->bstack_for_args += stack_slots * 8; + a->ostack_for_args = a->ostack_for_args + stack_slots * 8; + } + + return 0; +} + +static void clear_garbage(struct jit_ctx *ctx, int reg, int effective_bytes) +{ + if (effective_bytes) { + int garbage_bits = 64 - 8 * effective_bytes; +#ifdef CONFIG_CPU_BIG_ENDIAN + /* garbage bits are at the right end */ + emit(A64_LSR(1, reg, reg, garbage_bits), ctx); + emit(A64_LSL(1, reg, reg, garbage_bits), ctx); +#else + /* garbage bits are at the left end */ + emit(A64_LSL(1, reg, reg, garbage_bits), ctx); + emit(A64_LSR(1, reg, reg, garbage_bits), ctx); +#endif } } -static void restore_args(struct jit_ctx *ctx, int args_off, int nregs) +static void save_args(struct jit_ctx *ctx, int bargs_off, int oargs_off, + const struct btf_func_model *m, + const struct arg_aux *a, + bool for_call_origin) { int i; + int reg; + int doff; + int soff; + int slots; + u8 tmp = bpf2a64[TMP_REG_1]; - for (i = 0; i < nregs; i++) { - emit(A64_LDR64I(i, A64_SP, args_off), ctx); - args_off += 8; + /* store arguments to the stack for the bpf program, or restore + * arguments from stack for the original function + */ + for (reg = 0; reg < a->regs_for_args; reg++) { + emit(for_call_origin ? + A64_LDR64I(reg, A64_SP, bargs_off) : + A64_STR64I(reg, A64_SP, bargs_off), + ctx); + bargs_off += 8; + } + + soff = 32; /* on stack arguments start from FP + 32 */ + doff = (for_call_origin ? oargs_off : bargs_off); + + /* save on stack arguments */ + for (i = a->args_in_regs; i < m->nr_args; i++) { + slots = (m->arg_size[i] + 7) / 8; + /* verifier ensures arg_size <= 16, so slots equals 1 or 2 */ + while (slots-- > 0) { + emit(A64_LDR64I(tmp, A64_FP, soff), ctx); + /* if there is unused space in the last slot, clear + * the garbage contained in the space. + */ + if (slots == 0 && !for_call_origin) + clear_garbage(ctx, tmp, m->arg_size[i] % 8); + emit(A64_STR64I(tmp, A64_SP, doff), ctx); + soff += 8; + doff += 8; + } + } +} + +static void restore_args(struct jit_ctx *ctx, int bargs_off, int nregs) +{ + int reg; + + for (reg = 0; reg < nregs; reg++) { + emit(A64_LDR64I(reg, A64_SP, bargs_off), ctx); + bargs_off += 8; } } @@ -2194,17 +2296,21 @@ static bool is_struct_ops_tramp(const struct bpf_tramp_links *fentry_links) */ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, struct bpf_tramp_links *tlinks, void *func_addr, - int nregs, u32 flags) + const struct btf_func_model *m, + const struct arg_aux *a, + u32 flags) { int i; int stack_size; int retaddr_off; int regs_off; int retval_off; - int args_off; - int nregs_off; + int bargs_off; + int nfuncargs_off; int ip_off; int run_ctx_off; + int oargs_off; + int nfuncargs; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -2213,31 +2319,38 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, bool is_struct_ops = is_struct_ops_tramp(fentry); /* trampoline stack layout: - * [ parent ip ] - * [ FP ] - * SP + retaddr_off [ self ip ] - * [ FP ] + * [ parent ip ] + * [ FP ] + * SP + retaddr_off [ self ip ] + * [ FP ] * - * [ padding ] align SP to multiples of 16 + * [ padding ] align SP to multiples of 16 * - * [ x20 ] callee saved reg x20 - * SP + regs_off [ x19 ] callee saved reg x19 + * [ x20 ] callee saved reg x20 + * SP + regs_off [ x19 ] callee saved reg x19 * - * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or - * BPF_TRAMP_F_RET_FENTRY_RET + * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or + * BPF_TRAMP_F_RET_FENTRY_RET + * [ arg reg N ] + * [ ... ] + * SP + bargs_off [ arg reg 1 ] for bpf * - * [ arg reg N ] - * [ ... ] - * SP + args_off [ arg reg 1 ] + * SP + nfuncargs_off [ arg regs count ] * - * SP + nregs_off [ arg regs count ] + * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag * - * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag + * SP + run_ctx_off [ bpf_tramp_run_ctx ] * - * SP + run_ctx_off [ bpf_tramp_run_ctx ] + * [ stack arg N ] + * [ ... ] + * SP + oargs_off [ stack arg 1 ] for original func */ stack_size = 0; + oargs_off = stack_size; + if (flags & BPF_TRAMP_F_CALL_ORIG) + stack_size += a->ostack_for_args; + run_ctx_off = stack_size; /* room for bpf_tramp_run_ctx */ stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8); @@ -2247,13 +2360,14 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_IP_ARG) stack_size += 8; - nregs_off = stack_size; + nfuncargs_off = stack_size; /* room for args count */ stack_size += 8; - args_off = stack_size; + bargs_off = stack_size; /* room for args */ - stack_size += nregs * 8; + nfuncargs = a->regs_for_args + a->bstack_for_args / 8; + stack_size += 8 * nfuncargs; /* room for return value */ retval_off = stack_size; @@ -2300,11 +2414,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } /* save arg regs count*/ - emit(A64_MOVZ(1, A64_R(10), nregs, 0), ctx); - emit(A64_STR64I(A64_R(10), A64_SP, nregs_off), ctx); + emit(A64_MOVZ(1, A64_R(10), nfuncargs, 0), ctx); + emit(A64_STR64I(A64_R(10), A64_SP, nfuncargs_off), ctx); - /* save arg regs */ - save_args(ctx, args_off, nregs); + /* save args for bpf */ + save_args(ctx, bargs_off, oargs_off, m, a, false); /* save callee saved registers */ emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx); @@ -2320,7 +2434,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } for (i = 0; i < fentry->nr_links; i++) - invoke_bpf_prog(ctx, fentry->links[i], args_off, + invoke_bpf_prog(ctx, fentry->links[i], bargs_off, retval_off, run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET); @@ -2330,12 +2444,13 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (!branches) return -ENOMEM; - invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off, + invoke_bpf_mod_ret(ctx, fmod_ret, bargs_off, retval_off, run_ctx_off, branches); } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_args(ctx, args_off, nregs); + /* save args for original func */ + save_args(ctx, bargs_off, oargs_off, m, a, true); /* call original func */ emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx); emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx); @@ -2354,7 +2469,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } for (i = 0; i < fexit->nr_links; i++) - invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off, + invoke_bpf_prog(ctx, fexit->links[i], bargs_off, retval_off, run_ctx_off, false); if (flags & BPF_TRAMP_F_CALL_ORIG) { @@ -2368,7 +2483,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_args(ctx, args_off, nregs); + restore_args(ctx, bargs_off, a->regs_for_args); /* restore callee saved register x19 and x20 */ emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx); @@ -2428,14 +2543,16 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, .idx = 0, }; struct bpf_tramp_image im; + struct arg_aux aaux; int nregs, ret; nregs = btf_func_model_nregs(m); - /* the first 8 registers are used for arguments */ - if (nregs > 8) - return -ENOTSUPP; - ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, nregs, flags); + ret = calc_arg_aux(m, &aaux); + if (ret < 0) + return ret; + + ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, m, &aaux, flags); if (ret < 0) return ret; @@ -2462,9 +2579,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { - int ret, nregs; - void *image, *tmp; u32 size = ro_image_end - ro_image; + struct arg_aux aaux; + void *image, *tmp; + int ret; /* image doesn't need to be in module memory range, so we can * use kvmalloc. @@ -2480,13 +2598,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, .write = true, }; - nregs = btf_func_model_nregs(m); - /* the first 8 registers are used for arguments */ - if (nregs > 8) - return -ENOTSUPP; jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image)); - ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags); + ret = calc_arg_aux(m, &aaux); + if (ret) + goto out; + ret = prepare_trampoline(&ctx, im, tlinks, func_addr, m, &aaux, flags); if (ret > 0 && validate_code(&ctx) < 0) { ret = -EINVAL; From 149ead9d7e3de786786ce496133325b3f358ec8a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Lothor=C3=A9=20=28eBPF=20Foundation=29?= Date: Tue, 27 May 2025 12:06:04 +0200 Subject: [PATCH 132/135] selftests/bpf: enable many-args tests for arm64 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that support for up to 12 args is enabled for tracing programs on ARM64, enable the existing tests for this feature on this architecture. Signed-off-by: Alexis Lothoré (eBPF Foundation) Link: https://lore.kernel.org/r/20250527-many_args_arm64-v3-2-3faf7bb8e4a2@bootlin.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST.aarch64 | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 6d8feda27ce9..12e99c0277a8 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -1,3 +1 @@ -fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524 -fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524 tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524 From e2d2115e56c4a02377189bfc3a9a7933552a7b0f Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 23 May 2025 21:13:35 -0700 Subject: [PATCH 133/135] bpf: Do not include stack ptr register in precision backtracking bookkeeping Yi Lai reported an issue ([1]) where the following warning appears in kernel dmesg: [ 60.643604] verifier backtracking bug [ 60.643635] WARNING: CPU: 10 PID: 2315 at kernel/bpf/verifier.c:4302 __mark_chain_precision+0x3a6c/0x3e10 [ 60.648428] Modules linked in: bpf_testmod(OE) [ 60.650471] CPU: 10 UID: 0 PID: 2315 Comm: test_progs Tainted: G OE 6.15.0-rc4-gef11287f8289-dirty #327 PREEMPT(full) [ 60.654385] Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODULE [ 60.656682] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [ 60.660475] RIP: 0010:__mark_chain_precision+0x3a6c/0x3e10 [ 60.662814] Code: 5a 30 84 89 ea e8 c4 d9 01 00 80 3d 3e 7d d8 04 00 0f 85 60 fa ff ff c6 05 31 7d d8 04 01 48 c7 c7 00 58 30 84 e8 c4 06 a5 ff <0f> 0b e9 46 fa ff ff 48 ... [ 60.668720] RSP: 0018:ffff888116cc7298 EFLAGS: 00010246 [ 60.671075] RAX: 54d70e82dfd31900 RBX: ffff888115b65e20 RCX: 0000000000000000 [ 60.673659] RDX: 0000000000000001 RSI: 0000000000000004 RDI: 00000000ffffffff [ 60.676241] RBP: 0000000000000400 R08: ffff8881f6f23bd3 R09: 1ffff1103ede477a [ 60.678787] R10: dffffc0000000000 R11: ffffed103ede477b R12: ffff888115b60ae8 [ 60.681420] R13: 1ffff11022b6cbc4 R14: 00000000fffffff2 R15: 0000000000000001 [ 60.684030] FS: 00007fc2aedd80c0(0000) GS:ffff88826fa8a000(0000) knlGS:0000000000000000 [ 60.686837] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 60.689027] CR2: 000056325369e000 CR3: 000000011088b002 CR4: 0000000000370ef0 [ 60.691623] Call Trace: [ 60.692821] [ 60.693960] ? __pfx_verbose+0x10/0x10 [ 60.695656] ? __pfx_disasm_kfunc_name+0x10/0x10 [ 60.697495] check_cond_jmp_op+0x16f7/0x39b0 [ 60.699237] do_check+0x58fa/0xab10 ... Further analysis shows the warning is at line 4302 as below: 4294 /* static subprog call instruction, which 4295 * means that we are exiting current subprog, 4296 * so only r1-r5 could be still requested as 4297 * precise, r0 and r6-r10 or any stack slot in 4298 * the current frame should be zero by now 4299 */ 4300 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 4301 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 4302 WARN_ONCE(1, "verifier backtracking bug"); 4303 return -EFAULT; 4304 } With the below test (also in the next patch): __used __naked static void __bpf_jmp_r10(void) { asm volatile ( "r2 = 2314885393468386424 ll;" "goto +0;" "if r2 <= r10 goto +3;" "if r1 >= -1835016 goto +0;" "if r2 <= 8 goto +0;" "if r3 <= 0 goto +0;" "exit;" ::: __clobber_all); } SEC("?raw_tp") __naked void bpf_jmp_r10(void) { asm volatile ( "r3 = 0 ll;" "call __bpf_jmp_r10;" "r0 = 0;" "exit;" ::: __clobber_all); } The following is the verifier failure log: 0: (18) r3 = 0x0 ; R3_w=0 2: (85) call pc+2 caller: R10=fp0 callee: frame1: R1=ctx() R3_w=0 R10=fp0 5: frame1: R1=ctx() R3_w=0 R10=fp0 ; asm volatile (" \ @ verifier_precision.c:184 5: (18) r2 = 0x20202000256c6c78 ; frame1: R2_w=0x20202000256c6c78 7: (05) goto pc+0 8: (bd) if r2 <= r10 goto pc+3 ; frame1: R2_w=0x20202000256c6c78 R10=fp0 9: (35) if r1 >= 0xffe3fff8 goto pc+0 ; frame1: R1=ctx() 10: (b5) if r2 <= 0x8 goto pc+0 mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1 mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0 mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3 mark_precise: frame1: regs=r2,r10 stack= before 7: (05) goto pc+0 mark_precise: frame1: regs=r2,r10 stack= before 5: (18) r2 = 0x20202000256c6c78 mark_precise: frame1: regs=r10 stack= before 2: (85) call pc+2 BUG regs 400 The main failure reason is due to r10 in precision backtracking bookkeeping. Actually r10 is always precise and there is no need to add it for the precision backtracking bookkeeping. One way to fix the issue is to prevent bt_set_reg() if any src/dst reg is r10. Andrii suggested to go with push_insn_history() approach to avoid explicitly checking r10 in backtrack_insn(). This patch added push_insn_history() support for cond_jmp like 'rX rY' operations. In check_cond_jmp_op(), if any of rX or rY is a stack pointer, push_insn_history() will record such information, and later backtrack_insn() will do bt_set_reg() properly for those register(s). [1] https://lore.kernel.org/bpf/Z%2F8q3xzpU59CIYQE@ly-workstation/ Reported by: Yi Lai Fixes: 407958a0e980 ("bpf: encapsulate precision backtracking bookkeeping") Signed-off-by: Yonghong Song Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250524041335.4046126-1-yonghong.song@linux.dev --- include/linux/bpf_verifier.h | 12 ++++++++---- kernel/bpf/verifier.c | 18 ++++++++++++++++-- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 78c97e12ea4e..256274acb1d8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -356,7 +356,11 @@ enum { INSN_F_SPI_MASK = 0x3f, /* 6 bits */ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */ - INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */ + INSN_F_STACK_ACCESS = BIT(9), + + INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */ + INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */ + /* total 12 bits are used now. */ }; static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); @@ -365,9 +369,9 @@ static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); struct bpf_insn_hist_entry { u32 idx; /* insn idx can't be bigger than 1 million */ - u32 prev_idx : 22; - /* special flags, e.g., whether insn is doing register stack spill/load */ - u32 flags : 10; + u32 prev_idx : 20; + /* special INSN_F_xxx flags */ + u32 flags : 12; /* additional registers that need precision tracking when this * jump is backtracked, vector of six 10-bit records */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 99582e5a8c69..98c52829936e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4410,8 +4410,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, * before it would be equally necessary to * propagate it to dreg. */ - bt_set_reg(bt, dreg); - bt_set_reg(bt, sreg); + if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK)) + bt_set_reg(bt, sreg); + if (!hist || !(hist->flags & INSN_F_DST_REG_STACK)) + bt_set_reg(bt, dreg); } else if (BPF_SRC(insn->code) == BPF_K) { /* dreg K * Only dreg still needs precision before @@ -16392,6 +16394,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_reg_state *eq_branch_regs; struct linked_regs linked_regs = {}; u8 opcode = BPF_OP(insn->code); + int insn_flags = 0; bool is_jmp32; int pred = -1; int err; @@ -16450,6 +16453,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, insn->src_reg); return -EACCES; } + + if (src_reg->type == PTR_TO_STACK) + insn_flags |= INSN_F_SRC_REG_STACK; } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); @@ -16461,6 +16467,14 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, __mark_reg_known(src_reg, insn->imm); } + if (dst_reg->type == PTR_TO_STACK) + insn_flags |= INSN_F_DST_REG_STACK; + if (insn_flags) { + err = push_insn_history(env, this_branch, insn_flags, 0); + if (err) + return err; + } + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32); if (pred >= 0) { From 5ffb537e416ee22dbfb3d552102e50da33fec7f6 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 23 May 2025 21:13:40 -0700 Subject: [PATCH 134/135] selftests/bpf: Add tests with stack ptr register in conditional jmp Add two tests: - one test has 'rX r10' where rX is not r10, and - another test has 'rX rY' where rX and rY are not r10 but there is an early insn 'rX = r10'. Without previous verifier change, both tests will fail. Signed-off-by: Yonghong Song Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20250524041340.4046304-1-yonghong.song@linux.dev --- kernel/bpf/verifier.c | 7 ++- .../selftests/bpf/progs/verifier_precision.c | 53 +++++++++++++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 98c52829936e..a7d6e0c5928b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16456,6 +16456,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, if (src_reg->type == PTR_TO_STACK) insn_flags |= INSN_F_SRC_REG_STACK; + if (dst_reg->type == PTR_TO_STACK) + insn_flags |= INSN_F_DST_REG_STACK; } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); @@ -16465,10 +16467,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, memset(src_reg, 0, sizeof(*src_reg)); src_reg->type = SCALAR_VALUE; __mark_reg_known(src_reg, insn->imm); + + if (dst_reg->type == PTR_TO_STACK) + insn_flags |= INSN_F_DST_REG_STACK; } - if (dst_reg->type == PTR_TO_STACK) - insn_flags |= INSN_F_DST_REG_STACK; if (insn_flags) { err = push_insn_history(env, this_branch, insn_flags, 0); if (err) diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c index 2dd0d15c2678..9fe5d255ee37 100644 --- a/tools/testing/selftests/bpf/progs/verifier_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -178,4 +178,57 @@ __naked int state_loop_first_last_equal(void) ); } +__used __naked static void __bpf_cond_op_r10(void) +{ + asm volatile ( + "r2 = 2314885393468386424 ll;" + "goto +0;" + "if r2 <= r10 goto +3;" + "if r1 >= -1835016 goto +0;" + "if r2 <= 8 goto +0;" + "if r3 <= 0 goto +0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("8: (bd) if r2 <= r10 goto pc+3") +__msg("9: (35) if r1 >= 0xffe3fff8 goto pc+0") +__msg("10: (b5) if r2 <= 0x8 goto pc+0") +__msg("mark_precise: frame1: last_idx 10 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame1: regs=r2 stack= before 9: (35) if r1 >= 0xffe3fff8 goto pc+0") +__msg("mark_precise: frame1: regs=r2 stack= before 8: (bd) if r2 <= r10 goto pc+3") +__msg("mark_precise: frame1: regs=r2 stack= before 7: (05) goto pc+0") +__naked void bpf_cond_op_r10(void) +{ + asm volatile ( + "r3 = 0 ll;" + "call __bpf_cond_op_r10;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("3: (bf) r3 = r10") +__msg("4: (bd) if r3 <= r2 goto pc+1") +__msg("5: (b5) if r2 <= 0x8 goto pc+2") +__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r2 stack= before 4: (bd) if r3 <= r2 goto pc+1") +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r3 = r10") +__naked void bpf_cond_op_not_r10(void) +{ + asm volatile ( + "r0 = 0;" + "r2 = 2314885393468386424 ll;" + "r3 = r10;" + "if r3 <= r2 goto +1;" + "if r2 <= 8 goto +2;" + "r0 = 2 ll;" + "exit;" + ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; From c5cebb241e27ed0c3f4c1d2ce63089398e0ed17e Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 27 May 2025 17:27:04 -0700 Subject: [PATCH 135/135] bpf, arm64: Remove unused-but-set function and variable. Remove unused-but-set function and variable to fix the build warning: arch/arm64/net/bpf_jit_comp.c: In function 'arch_bpf_trampoline_size': 2547 | int nregs, ret; | ^~~~~ Fixes: 9014cf56f13d ("bpf, arm64: Support up to 12 function arguments") Reported-by: kernel test robot Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: Xu Kuohai Link: https://lore.kernel.org/bpf/20250528002704.21197-1-alexei.starovoitov@gmail.com Closes: https://lore.kernel.org/oe-kbuild-all/202505280643.h0qYcSCM-lkp@intel.com/ --- arch/arm64/net/bpf_jit_comp.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index b5c3ab623536..14d4c6ac4ca0 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2520,21 +2520,6 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, return ctx->idx; } -static int btf_func_model_nregs(const struct btf_func_model *m) -{ - int nregs = m->nr_args; - int i; - - /* extra registers needed for struct argument */ - for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) { - /* The arg_size is at most 16 bytes, enforced by the verifier. */ - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - nregs += (m->arg_size[i] + 7) / 8 - 1; - } - - return nregs; -} - int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { @@ -2543,10 +2528,8 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, .idx = 0, }; struct bpf_tramp_image im; - struct arg_aux aaux; - int nregs, ret; - - nregs = btf_func_model_nregs(m); + struct arg_aux aaux; + int ret; ret = calc_arg_aux(m, &aaux); if (ret < 0)