mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-25 19:42:19 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Cross-merge bpf fixes after downstream PR. No conflicts. Adjacent changes in: include/linux/bpf.h include/uapi/linux/bpf.h kernel/bpf/btf.c kernel/bpf/helpers.c kernel/bpf/syscall.c kernel/bpf/verifier.c kernel/trace/bpf_trace.c mm/slab_common.c tools/include/uapi/linux/bpf.h tools/testing/selftests/bpf/Makefile Link: https://lore.kernel.org/all/20241024215724.60017-1-daniel@iogearbox.net/ Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
@@ -339,10 +339,6 @@ BTF_ID(func, bpf_lsm_path_chmod)
|
||||
BTF_ID(func, bpf_lsm_path_chown)
|
||||
#endif /* CONFIG_SECURITY_PATH */
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
BTF_ID(func, bpf_lsm_key_free)
|
||||
#endif /* CONFIG_KEYS */
|
||||
|
||||
BTF_ID(func, bpf_lsm_mmap_file)
|
||||
BTF_ID(func, bpf_lsm_netlink_send)
|
||||
BTF_ID(func, bpf_lsm_path_notify)
|
||||
|
||||
@@ -3528,7 +3528,7 @@ end:
|
||||
* (i + 1) * elem_size
|
||||
* where i is the repeat index and elem_size is the size of an element.
|
||||
*/
|
||||
static int btf_repeat_fields(struct btf_field_info *info,
|
||||
static int btf_repeat_fields(struct btf_field_info *info, int info_cnt,
|
||||
u32 field_cnt, u32 repeat_cnt, u32 elem_size)
|
||||
{
|
||||
u32 i, j;
|
||||
@@ -3549,6 +3549,12 @@ static int btf_repeat_fields(struct btf_field_info *info,
|
||||
}
|
||||
}
|
||||
|
||||
/* The type of struct size or variable size is u32,
|
||||
* so the multiplication will not overflow.
|
||||
*/
|
||||
if (field_cnt * (repeat_cnt + 1) > info_cnt)
|
||||
return -E2BIG;
|
||||
|
||||
cur = field_cnt;
|
||||
for (i = 0; i < repeat_cnt; i++) {
|
||||
memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0]));
|
||||
@@ -3593,7 +3599,7 @@ static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *
|
||||
info[i].off += off;
|
||||
|
||||
if (nelems > 1) {
|
||||
err = btf_repeat_fields(info, ret, nelems - 1, t->size);
|
||||
err = btf_repeat_fields(info, info_cnt, ret, nelems - 1, t->size);
|
||||
if (err == 0)
|
||||
ret *= nelems;
|
||||
else
|
||||
@@ -3688,10 +3694,10 @@ static int btf_find_field_one(const struct btf *btf,
|
||||
|
||||
if (ret == BTF_FIELD_IGNORE)
|
||||
return 0;
|
||||
if (nelems > info_cnt)
|
||||
if (!info_cnt)
|
||||
return -E2BIG;
|
||||
if (nelems > 1) {
|
||||
ret = btf_repeat_fields(info, 1, nelems - 1, sz);
|
||||
ret = btf_repeat_fields(info, info_cnt, 1, nelems - 1, sz);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@@ -8985,6 +8991,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
|
||||
if (!type) {
|
||||
bpf_log(ctx->log, "relo #%u: bad type id %u\n",
|
||||
relo_idx, relo->type_id);
|
||||
kfree(specs);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -333,9 +333,11 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
|
||||
|
||||
static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
|
||||
struct xdp_frame **frames, int n,
|
||||
struct net_device *dev)
|
||||
struct net_device *tx_dev,
|
||||
struct net_device *rx_dev)
|
||||
{
|
||||
struct xdp_txq_info txq = { .dev = dev };
|
||||
struct xdp_txq_info txq = { .dev = tx_dev };
|
||||
struct xdp_rxq_info rxq = { .dev = rx_dev };
|
||||
struct xdp_buff xdp;
|
||||
int i, nframes = 0;
|
||||
|
||||
@@ -346,6 +348,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
|
||||
|
||||
xdp_convert_frame_to_buff(xdpf, &xdp);
|
||||
xdp.txq = &txq;
|
||||
xdp.rxq = &rxq;
|
||||
|
||||
act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
switch (act) {
|
||||
@@ -360,7 +363,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
|
||||
bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(dev, xdp_prog, act);
|
||||
trace_xdp_exception(tx_dev, xdp_prog, act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
xdp_return_frame_rx_napi(xdpf);
|
||||
@@ -388,7 +391,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||
}
|
||||
|
||||
if (bq->xdp_prog) {
|
||||
to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
|
||||
to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
|
||||
if (!to_send)
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ const struct bpf_func_proto bpf_map_pop_elem_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
|
||||
@@ -124,7 +124,7 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
|
||||
.arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
|
||||
@@ -538,7 +538,7 @@ const struct bpf_func_proto bpf_strtol_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg4_size = sizeof(s64),
|
||||
};
|
||||
|
||||
@@ -566,7 +566,7 @@ const struct bpf_func_proto bpf_strtoul_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg4_size = sizeof(u64),
|
||||
};
|
||||
|
||||
@@ -1742,7 +1742,7 @@ static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
|
||||
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
|
||||
|
||||
@@ -880,7 +880,7 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
const struct btf_type *enum_t;
|
||||
const char *enum_pfx;
|
||||
u64 *delegate_msk, msk = 0;
|
||||
char *p;
|
||||
char *p, *str;
|
||||
int val;
|
||||
|
||||
/* ignore errors, fallback to hex */
|
||||
@@ -911,7 +911,8 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while ((p = strsep(¶m->string, ":"))) {
|
||||
str = param->string;
|
||||
while ((p = strsep(&str, ":"))) {
|
||||
if (strcmp(p, "any") == 0) {
|
||||
msk |= ~0ULL;
|
||||
} else if (find_btf_enum_const(info.btf, enum_t, enum_pfx, p, &val)) {
|
||||
|
||||
@@ -688,8 +688,7 @@ static void print_reg_state(struct bpf_verifier_env *env,
|
||||
if (t == SCALAR_VALUE && reg->precise)
|
||||
verbose(env, "P");
|
||||
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
|
||||
/* reg->off should be 0 for SCALAR_VALUE */
|
||||
verbose_snum(env, reg->var_off.value + reg->off);
|
||||
verbose_snum(env, reg->var_off.value);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ struct bpf_ringbuf {
|
||||
u64 mask;
|
||||
struct page **pages;
|
||||
int nr_pages;
|
||||
spinlock_t spinlock ____cacheline_aligned_in_smp;
|
||||
raw_spinlock_t spinlock ____cacheline_aligned_in_smp;
|
||||
/* For user-space producer ring buffers, an atomic_t busy bit is used
|
||||
* to synchronize access to the ring buffers in the kernel, rather than
|
||||
* the spinlock that is used for kernel-producer ring buffers. This is
|
||||
@@ -173,7 +173,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
|
||||
if (!rb)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&rb->spinlock);
|
||||
raw_spin_lock_init(&rb->spinlock);
|
||||
atomic_set(&rb->busy, 0);
|
||||
init_waitqueue_head(&rb->waitq);
|
||||
init_irq_work(&rb->work, bpf_ringbuf_notify);
|
||||
@@ -421,10 +421,10 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
|
||||
cons_pos = smp_load_acquire(&rb->consumer_pos);
|
||||
|
||||
if (in_nmi()) {
|
||||
if (!spin_trylock_irqsave(&rb->spinlock, flags))
|
||||
if (!raw_spin_trylock_irqsave(&rb->spinlock, flags))
|
||||
return NULL;
|
||||
} else {
|
||||
spin_lock_irqsave(&rb->spinlock, flags);
|
||||
raw_spin_lock_irqsave(&rb->spinlock, flags);
|
||||
}
|
||||
|
||||
pend_pos = rb->pending_pos;
|
||||
@@ -450,7 +450,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
|
||||
*/
|
||||
if (new_prod_pos - cons_pos > rb->mask ||
|
||||
new_prod_pos - pend_pos > rb->mask) {
|
||||
spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
raw_spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -462,7 +462,7 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
|
||||
/* pairs with consumer's smp_load_acquire() */
|
||||
smp_store_release(&rb->producer_pos, new_prod_pos);
|
||||
|
||||
spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
raw_spin_unlock_irqrestore(&rb->spinlock, flags);
|
||||
|
||||
return (void *)hdr + BPF_RINGBUF_HDR_SZ;
|
||||
}
|
||||
@@ -632,7 +632,7 @@ const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
|
||||
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT | MEM_WRITE,
|
||||
};
|
||||
|
||||
BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
|
||||
|
||||
@@ -3169,13 +3169,17 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
{
|
||||
const struct bpf_link *link = filp->private_data;
|
||||
const struct bpf_prog *prog = link->prog;
|
||||
enum bpf_link_type type = link->type;
|
||||
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
|
||||
|
||||
seq_printf(m,
|
||||
"link_type:\t%s\n"
|
||||
"link_id:\t%u\n",
|
||||
bpf_link_type_strs[link->type],
|
||||
link->id);
|
||||
if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) {
|
||||
seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]);
|
||||
} else {
|
||||
WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type);
|
||||
seq_printf(m, "link_type:\t<%u>\n", type);
|
||||
}
|
||||
seq_printf(m, "link_id:\t%u\n", link->id);
|
||||
|
||||
if (prog) {
|
||||
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
|
||||
seq_printf(m,
|
||||
@@ -3666,15 +3670,16 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
|
||||
}
|
||||
|
||||
static int bpf_perf_link_fill_common(const struct perf_event *event,
|
||||
char __user *uname, u32 ulen,
|
||||
char __user *uname, u32 *ulenp,
|
||||
u64 *probe_offset, u64 *probe_addr,
|
||||
u32 *fd_type, unsigned long *missed)
|
||||
{
|
||||
const char *buf;
|
||||
u32 prog_id;
|
||||
u32 prog_id, ulen;
|
||||
size_t len;
|
||||
int err;
|
||||
|
||||
ulen = *ulenp;
|
||||
if (!ulen ^ !uname)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -3682,10 +3687,17 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
|
||||
probe_offset, probe_addr, missed);
|
||||
if (err)
|
||||
return err;
|
||||
if (!uname)
|
||||
return 0;
|
||||
|
||||
if (buf) {
|
||||
len = strlen(buf);
|
||||
*ulenp = len + 1;
|
||||
} else {
|
||||
*ulenp = 1;
|
||||
}
|
||||
if (!uname)
|
||||
return 0;
|
||||
|
||||
if (buf) {
|
||||
err = bpf_copy_to_user(uname, buf, ulen, len);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -3710,7 +3722,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
|
||||
|
||||
uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
|
||||
ulen = info->perf_event.kprobe.name_len;
|
||||
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
|
||||
err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
|
||||
&type, &missed);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -3718,7 +3730,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
|
||||
info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
|
||||
else
|
||||
info->perf_event.type = BPF_PERF_EVENT_KPROBE;
|
||||
|
||||
info->perf_event.kprobe.name_len = ulen;
|
||||
info->perf_event.kprobe.offset = offset;
|
||||
info->perf_event.kprobe.missed = missed;
|
||||
if (!kallsyms_show_value(current_cred()))
|
||||
@@ -3740,7 +3752,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
|
||||
|
||||
uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
|
||||
ulen = info->perf_event.uprobe.name_len;
|
||||
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
|
||||
err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
|
||||
&type, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -3749,6 +3761,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
|
||||
info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
|
||||
else
|
||||
info->perf_event.type = BPF_PERF_EVENT_UPROBE;
|
||||
info->perf_event.uprobe.name_len = ulen;
|
||||
info->perf_event.uprobe.offset = offset;
|
||||
info->perf_event.uprobe.cookie = event->bpf_cookie;
|
||||
return 0;
|
||||
@@ -3774,12 +3787,18 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
|
||||
{
|
||||
char __user *uname;
|
||||
u32 ulen;
|
||||
int err;
|
||||
|
||||
uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
|
||||
ulen = info->perf_event.tracepoint.name_len;
|
||||
err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
|
||||
info->perf_event.tracepoint.name_len = ulen;
|
||||
info->perf_event.tracepoint.cookie = event->bpf_cookie;
|
||||
return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
|
||||
@@ -5978,7 +5997,7 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
|
||||
.arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
|
||||
.arg4_size = sizeof(u64),
|
||||
};
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *co
|
||||
rcu_read_lock();
|
||||
pid = find_pid_ns(common->pid, common->ns);
|
||||
if (pid) {
|
||||
task = get_pid_task(pid, PIDTYPE_TGID);
|
||||
task = get_pid_task(pid, PIDTYPE_PID);
|
||||
*tid = common->pid;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -2750,10 +2750,16 @@ static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
|
||||
b->module = mod;
|
||||
b->offset = offset;
|
||||
|
||||
/* sort() reorders entries by value, so b may no longer point
|
||||
* to the right entry after this
|
||||
*/
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_btf_cmp_by_off, NULL);
|
||||
} else {
|
||||
btf = b->btf;
|
||||
}
|
||||
return b->btf;
|
||||
|
||||
return btf;
|
||||
}
|
||||
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
|
||||
@@ -6360,10 +6366,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
|
||||
|
||||
/* both of s64_max/s64_min positive or negative */
|
||||
if ((s64_max >= 0) == (s64_min >= 0)) {
|
||||
reg->smin_value = reg->s32_min_value = s64_min;
|
||||
reg->smax_value = reg->s32_max_value = s64_max;
|
||||
reg->umin_value = reg->u32_min_value = s64_min;
|
||||
reg->umax_value = reg->u32_max_value = s64_max;
|
||||
reg->s32_min_value = reg->smin_value = s64_min;
|
||||
reg->s32_max_value = reg->smax_value = s64_max;
|
||||
reg->u32_min_value = reg->umin_value = s64_min;
|
||||
reg->u32_max_value = reg->umax_value = s64_max;
|
||||
reg->var_off = tnum_range(s64_min, s64_max);
|
||||
return;
|
||||
}
|
||||
@@ -7459,7 +7465,8 @@ mark:
|
||||
}
|
||||
|
||||
static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
int access_size, bool zero_size_allowed,
|
||||
int access_size, enum bpf_access_type access_type,
|
||||
bool zero_size_allowed,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno];
|
||||
@@ -7471,7 +7478,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return check_packet_access(env, regno, reg->off, access_size,
|
||||
zero_size_allowed);
|
||||
case PTR_TO_MAP_KEY:
|
||||
if (meta && meta->raw_mode) {
|
||||
if (access_type == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
@@ -7479,15 +7486,13 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return check_mem_region_access(env, regno, reg->off, access_size,
|
||||
reg->map_ptr->key_size, false);
|
||||
case PTR_TO_MAP_VALUE:
|
||||
if (check_map_access_type(env, regno, reg->off, access_size,
|
||||
meta && meta->raw_mode ? BPF_WRITE :
|
||||
BPF_READ))
|
||||
if (check_map_access_type(env, regno, reg->off, access_size, access_type))
|
||||
return -EACCES;
|
||||
return check_map_access(env, regno, reg->off, access_size,
|
||||
zero_size_allowed, ACCESS_HELPER);
|
||||
case PTR_TO_MEM:
|
||||
if (type_is_rdonly_mem(reg->type)) {
|
||||
if (meta && meta->raw_mode) {
|
||||
if (access_type == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
@@ -7498,7 +7503,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
zero_size_allowed);
|
||||
case PTR_TO_BUF:
|
||||
if (type_is_rdonly_mem(reg->type)) {
|
||||
if (meta && meta->raw_mode) {
|
||||
if (access_type == BPF_WRITE) {
|
||||
verbose(env, "R%d cannot write into %s\n", regno,
|
||||
reg_type_str(env, reg->type));
|
||||
return -EACCES;
|
||||
@@ -7526,7 +7531,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
* Dynamically check it now.
|
||||
*/
|
||||
if (!env->ops->convert_ctx_access) {
|
||||
enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
|
||||
int offset = access_size - 1;
|
||||
|
||||
/* Allow zero-byte read from PTR_TO_CTX */
|
||||
@@ -7534,7 +7538,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
return zero_size_allowed ? 0 : -EACCES;
|
||||
|
||||
return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
|
||||
atype, -1, false, false);
|
||||
access_type, -1, false, false);
|
||||
}
|
||||
|
||||
fallthrough;
|
||||
@@ -7559,6 +7563,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
|
||||
*/
|
||||
static int check_mem_size_reg(struct bpf_verifier_env *env,
|
||||
struct bpf_reg_state *reg, u32 regno,
|
||||
enum bpf_access_type access_type,
|
||||
bool zero_size_allowed,
|
||||
struct bpf_call_arg_meta *meta)
|
||||
{
|
||||
@@ -7574,15 +7579,12 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
|
||||
*/
|
||||
meta->msize_max_value = reg->umax_value;
|
||||
|
||||
/* The register is SCALAR_VALUE; the access check
|
||||
* happens using its boundaries.
|
||||
/* The register is SCALAR_VALUE; the access check happens using
|
||||
* its boundaries. For unprivileged variable accesses, disable
|
||||
* raw mode so that the program is required to initialize all
|
||||
* the memory that the helper could just partially fill up.
|
||||
*/
|
||||
if (!tnum_is_const(reg->var_off))
|
||||
/* For unprivileged variable accesses, disable raw
|
||||
* mode so that the program is required to
|
||||
* initialize all the memory that the helper could
|
||||
* just partially fill up.
|
||||
*/
|
||||
meta = NULL;
|
||||
|
||||
if (reg->smin_value < 0) {
|
||||
@@ -7602,9 +7604,8 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
|
||||
regno);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_helper_mem_access(env, regno - 1,
|
||||
reg->umax_value,
|
||||
zero_size_allowed, meta);
|
||||
err = check_helper_mem_access(env, regno - 1, reg->umax_value,
|
||||
access_type, zero_size_allowed, meta);
|
||||
if (!err)
|
||||
err = mark_chain_precision(env, regno);
|
||||
return err;
|
||||
@@ -7615,13 +7616,11 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
|
||||
{
|
||||
bool may_be_null = type_may_be_null(reg->type);
|
||||
struct bpf_reg_state saved_reg;
|
||||
struct bpf_call_arg_meta meta;
|
||||
int err;
|
||||
|
||||
if (register_is_null(reg))
|
||||
return 0;
|
||||
|
||||
memset(&meta, 0, sizeof(meta));
|
||||
/* Assuming that the register contains a value check if the memory
|
||||
* access is safe. Temporarily save and restore the register's state as
|
||||
* the conversion shouldn't be visible to a caller.
|
||||
@@ -7631,10 +7630,8 @@ static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg
|
||||
mark_ptr_not_null_reg(reg);
|
||||
}
|
||||
|
||||
err = check_helper_mem_access(env, regno, mem_size, true, &meta);
|
||||
/* Check access for BPF_WRITE */
|
||||
meta.raw_mode = true;
|
||||
err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
|
||||
err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL);
|
||||
err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL);
|
||||
|
||||
if (may_be_null)
|
||||
*reg = saved_reg;
|
||||
@@ -7660,13 +7657,12 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
|
||||
mark_ptr_not_null_reg(mem_reg);
|
||||
}
|
||||
|
||||
err = check_mem_size_reg(env, reg, regno, true, &meta);
|
||||
/* Check access for BPF_WRITE */
|
||||
meta.raw_mode = true;
|
||||
err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
|
||||
err = check_mem_size_reg(env, reg, regno, BPF_READ, true, &meta);
|
||||
err = err ?: check_mem_size_reg(env, reg, regno, BPF_WRITE, true, &meta);
|
||||
|
||||
if (may_be_null)
|
||||
*mem_reg = saved_reg;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -8969,9 +8965,8 @@ skip_type_check:
|
||||
verbose(env, "invalid map_ptr to access map->key\n");
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_helper_mem_access(env, regno,
|
||||
meta->map_ptr->key_size, false,
|
||||
NULL);
|
||||
err = check_helper_mem_access(env, regno, meta->map_ptr->key_size,
|
||||
BPF_READ, false, NULL);
|
||||
break;
|
||||
case ARG_PTR_TO_MAP_VALUE:
|
||||
if (type_may_be_null(arg_type) && register_is_null(reg))
|
||||
@@ -8986,9 +8981,9 @@ skip_type_check:
|
||||
return -EACCES;
|
||||
}
|
||||
meta->raw_mode = arg_type & MEM_UNINIT;
|
||||
err = check_helper_mem_access(env, regno,
|
||||
meta->map_ptr->value_size, false,
|
||||
meta);
|
||||
err = check_helper_mem_access(env, regno, meta->map_ptr->value_size,
|
||||
arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
|
||||
false, meta);
|
||||
break;
|
||||
case ARG_PTR_TO_PERCPU_BTF_ID:
|
||||
if (!reg->btf_id) {
|
||||
@@ -9030,7 +9025,9 @@ skip_type_check:
|
||||
*/
|
||||
meta->raw_mode = arg_type & MEM_UNINIT;
|
||||
if (arg_type & MEM_FIXED_SIZE) {
|
||||
err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta);
|
||||
err = check_helper_mem_access(env, regno, fn->arg_size[arg],
|
||||
arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
|
||||
false, meta);
|
||||
if (err)
|
||||
return err;
|
||||
if (arg_type & MEM_ALIGNED)
|
||||
@@ -9038,10 +9035,16 @@ skip_type_check:
|
||||
}
|
||||
break;
|
||||
case ARG_CONST_SIZE:
|
||||
err = check_mem_size_reg(env, reg, regno, false, meta);
|
||||
err = check_mem_size_reg(env, reg, regno,
|
||||
fn->arg_type[arg - 1] & MEM_WRITE ?
|
||||
BPF_WRITE : BPF_READ,
|
||||
false, meta);
|
||||
break;
|
||||
case ARG_CONST_SIZE_OR_ZERO:
|
||||
err = check_mem_size_reg(env, reg, regno, true, meta);
|
||||
err = check_mem_size_reg(env, reg, regno,
|
||||
fn->arg_type[arg - 1] & MEM_WRITE ?
|
||||
BPF_WRITE : BPF_READ,
|
||||
true, meta);
|
||||
break;
|
||||
case ARG_PTR_TO_DYNPTR:
|
||||
err = process_dynptr_func(env, regno, insn_idx, arg_type, 0);
|
||||
@@ -14296,12 +14299,13 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
* r1 += 0x1
|
||||
* if r2 < 1000 goto ...
|
||||
* use r1 in memory access
|
||||
* So remember constant delta between r2 and r1 and update r1 after
|
||||
* 'if' condition.
|
||||
* So for 64-bit alu remember constant delta between r2 and r1 and
|
||||
* update r1 after 'if' condition.
|
||||
*/
|
||||
if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD &&
|
||||
dst_reg->id && is_reg_const(src_reg, alu32)) {
|
||||
u64 val = reg_const_value(src_reg, alu32);
|
||||
if (env->bpf_capable &&
|
||||
BPF_OP(insn->code) == BPF_ADD && !alu32 &&
|
||||
dst_reg->id && is_reg_const(src_reg, false)) {
|
||||
u64 val = reg_const_value(src_reg, false);
|
||||
|
||||
if ((dst_reg->id & BPF_ADD_CONST) ||
|
||||
/* prevent overflow in sync_linked_regs() later */
|
||||
@@ -15358,8 +15362,12 @@ static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_s
|
||||
continue;
|
||||
if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) ||
|
||||
reg->off == known_reg->off) {
|
||||
s32 saved_subreg_def = reg->subreg_def;
|
||||
|
||||
copy_register_state(reg, known_reg);
|
||||
reg->subreg_def = saved_subreg_def;
|
||||
} else {
|
||||
s32 saved_subreg_def = reg->subreg_def;
|
||||
s32 saved_off = reg->off;
|
||||
|
||||
fake_reg.type = SCALAR_VALUE;
|
||||
@@ -15372,6 +15380,7 @@ static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_s
|
||||
* otherwise another sync_linked_regs() will be incorrect.
|
||||
*/
|
||||
reg->off = saved_off;
|
||||
reg->subreg_def = saved_subreg_def;
|
||||
|
||||
scalar32_min_max_add(reg, &fake_reg);
|
||||
scalar_min_max_add(reg, &fake_reg);
|
||||
@@ -21230,7 +21239,7 @@ patch_map_ops_generic:
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
continue;
|
||||
goto next_insn;
|
||||
}
|
||||
|
||||
/* Implement bpf_kptr_xchg inline */
|
||||
@@ -22339,7 +22348,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
||||
/* 'struct bpf_verifier_env' can be global, but since it's not small,
|
||||
* allocate/free it every time bpf_check() is called
|
||||
*/
|
||||
env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -22575,6 +22584,6 @@ err_unlock:
|
||||
mutex_unlock(&bpf_verifier_lock);
|
||||
vfree(env->insn_aux_data);
|
||||
err_free_env:
|
||||
kfree(env);
|
||||
kvfree(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user