bpf:add _impl suffix for bpf_task_work_schedule* kfuncs

Rename:
bpf_task_work_schedule_resume()->bpf_task_work_schedule_resume_impl()
bpf_task_work_schedule_signal()->bpf_task_work_schedule_signal_impl()

This aligns task work scheduling kfuncs with the established naming
scheme for kfuncs with the bpf_prog_aux argument provided by the
verifier implicitly. This convention will be taken advantage of with the
upcoming KF_IMPLICIT_ARGS feature to preserve backwards compatibility to
BPF programs.

Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Link: https://lore.kernel.org/r/20251104-implv2-v3-1-4772b9ae0e06@meta.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Ihor Solodrai <ihor.solodrai@linux.dev>
This commit is contained in:
Mykyta Yatsenko
2025-11-04 22:54:25 +00:00
committed by Alexei Starovoitov
parent 156c75f596
commit ea0714d61d
5 changed files with 29 additions and 25 deletions

View File

@@ -4169,7 +4169,8 @@ release_prog:
}
/**
* bpf_task_work_schedule_signal - Schedule BPF callback using task_work_add with TWA_SIGNAL mode
* bpf_task_work_schedule_signal_impl - Schedule BPF callback using task_work_add with TWA_SIGNAL
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@@ -4178,15 +4179,17 @@ release_prog:
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
__bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct bpf_task_work *tw,
void *map__map, bpf_task_work_callback_t callback,
void *aux__prog)
__bpf_kfunc int bpf_task_work_schedule_signal_impl(struct task_struct *task,
struct bpf_task_work *tw, void *map__map,
bpf_task_work_callback_t callback,
void *aux__prog)
{
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_SIGNAL);
}
/**
* bpf_task_work_schedule_resume - Schedule BPF callback using task_work_add with TWA_RESUME mode
* bpf_task_work_schedule_resume_impl - Schedule BPF callback using task_work_add with TWA_RESUME
* mode
* @task: Task struct for which callback should be scheduled
* @tw: Pointer to struct bpf_task_work in BPF map value for internal bookkeeping
* @map__map: bpf_map that embeds struct bpf_task_work in the values
@@ -4195,9 +4198,10 @@ __bpf_kfunc int bpf_task_work_schedule_signal(struct task_struct *task, struct b
*
* Return: 0 if task work has been scheduled successfully, negative error code otherwise
*/
__bpf_kfunc int bpf_task_work_schedule_resume(struct task_struct *task, struct bpf_task_work *tw,
void *map__map, bpf_task_work_callback_t callback,
void *aux__prog)
__bpf_kfunc int bpf_task_work_schedule_resume_impl(struct task_struct *task,
struct bpf_task_work *tw, void *map__map,
bpf_task_work_callback_t callback,
void *aux__prog)
{
return bpf_task_work_schedule(task, tw, map__map, callback, aux__prog, TWA_RESUME);
}
@@ -4377,8 +4381,8 @@ BTF_ID_FLAGS(func, bpf_strnstr);
BTF_ID_FLAGS(func, bpf_cgroup_read_xattr, KF_RCU)
#endif
BTF_ID_FLAGS(func, bpf_stream_vprintk, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_signal_impl, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_task_work_schedule_resume_impl, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(common_btf_ids)
static const struct btf_kfunc_id_set common_kfunc_set = {

View File

@@ -12259,8 +12259,8 @@ enum special_kfunc_type {
KF_bpf_res_spin_lock_irqsave,
KF_bpf_res_spin_unlock_irqrestore,
KF___bpf_trap,
KF_bpf_task_work_schedule_signal,
KF_bpf_task_work_schedule_resume,
KF_bpf_task_work_schedule_signal_impl,
KF_bpf_task_work_schedule_resume_impl,
};
BTF_ID_LIST(special_kfunc_list)
@@ -12331,13 +12331,13 @@ BTF_ID(func, bpf_res_spin_unlock)
BTF_ID(func, bpf_res_spin_lock_irqsave)
BTF_ID(func, bpf_res_spin_unlock_irqrestore)
BTF_ID(func, __bpf_trap)
BTF_ID(func, bpf_task_work_schedule_signal)
BTF_ID(func, bpf_task_work_schedule_resume)
BTF_ID(func, bpf_task_work_schedule_signal_impl)
BTF_ID(func, bpf_task_work_schedule_resume_impl)
static bool is_task_work_add_kfunc(u32 func_id)
{
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal] ||
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume];
return func_id == special_kfunc_list[KF_bpf_task_work_schedule_signal_impl] ||
func_id == special_kfunc_list[KF_bpf_task_work_schedule_resume_impl];
}
static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)

View File

@@ -66,7 +66,7 @@ int oncpu_hash_map(struct pt_regs *args)
if (!work)
return 0;
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
return 0;
}
@@ -80,7 +80,7 @@ int oncpu_array_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
bpf_task_work_schedule_signal(task, &work->tw, &arrmap, process_work, NULL);
bpf_task_work_schedule_signal_impl(task, &work->tw, &arrmap, process_work, NULL);
return 0;
}
@@ -102,6 +102,6 @@ int oncpu_lru_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&lrumap, &key);
if (!work || work->data[0])
return 0;
bpf_task_work_schedule_resume(task, &work->tw, &lrumap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, &lrumap, process_work, NULL);
return 0;
}

View File

@@ -53,7 +53,7 @@ int mismatch_map(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
bpf_task_work_schedule_resume(task, &work->tw, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, &hmap, process_work, NULL);
return 0;
}
@@ -65,7 +65,7 @@ int no_map_task_work(struct pt_regs *args)
struct bpf_task_work tw;
task = bpf_get_current_task_btf();
bpf_task_work_schedule_resume(task, &tw, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &tw, &hmap, process_work, NULL);
return 0;
}
@@ -76,7 +76,7 @@ int task_work_null(struct pt_regs *args)
struct task_struct *task;
task = bpf_get_current_task_btf();
bpf_task_work_schedule_resume(task, NULL, &hmap, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, NULL, &hmap, process_work, NULL);
return 0;
}
@@ -91,6 +91,6 @@ int map_null(struct pt_regs *args)
work = bpf_map_lookup_elem(&arrmap, &key);
if (!work)
return 0;
bpf_task_work_schedule_resume(task, &work->tw, NULL, process_work, NULL);
bpf_task_work_schedule_resume_impl(task, &work->tw, NULL, process_work, NULL);
return 0;
}

View File

@@ -51,8 +51,8 @@ int schedule_task_work(void *ctx)
if (!work)
return 0;
}
err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
process_work, NULL);
err = bpf_task_work_schedule_signal_impl(bpf_get_current_task_btf(), &work->tw, &hmap,
process_work, NULL);
if (err)
__sync_fetch_and_add(&schedule_error, 1);
else