mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
ftrace: bpf: Fix IPMODIFY + DIRECT in modify_ftrace_direct()
ftrace_hash_ipmodify_enable() checks IPMODIFY and DIRECT ftrace_ops on the same kernel function. When needed, ftrace_hash_ipmodify_enable() calls ops->ops_func() to prepare the direct ftrace (BPF trampoline) to share the same function as the IPMODIFY ftrace (livepatch). ftrace_hash_ipmodify_enable() is called in register_ftrace_direct() path, but not called in modify_ftrace_direct() path. As a result, the following operations will break livepatch: 1. Load livepatch to a kernel function; 2. Attach fentry program to the kernel function; 3. Attach fexit program to the kernel function. After 3, the kernel function being used will not be the livepatched version, but the original version. Fix this by adding __ftrace_hash_update_ipmodify() to __modify_ftrace_direct() and adjust some logic around the call. Signed-off-by: Song Liu <song@kernel.org> Reviewed-by: Jiri Olsa <jolsa@kernel.org> Link: https://lore.kernel.org/r/20251027175023.1521602-3-song@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
committed by
Alexei Starovoitov
parent
56b3c85e15
commit
3e9a18e1c3
@@ -1971,7 +1971,8 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops)
|
||||
*/
|
||||
static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
|
||||
struct ftrace_hash *old_hash,
|
||||
struct ftrace_hash *new_hash)
|
||||
struct ftrace_hash *new_hash,
|
||||
bool update_target)
|
||||
{
|
||||
struct ftrace_page *pg;
|
||||
struct dyn_ftrace *rec, *end = NULL;
|
||||
@@ -2006,10 +2007,13 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
/* We need to update only differences of filter_hash */
|
||||
/*
|
||||
* Unless we are updating the target of a direct function,
|
||||
* we only need to update differences of filter_hash
|
||||
*/
|
||||
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
|
||||
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
|
||||
if (in_old == in_new)
|
||||
if (!update_target && (in_old == in_new))
|
||||
continue;
|
||||
|
||||
if (in_new) {
|
||||
@@ -2020,7 +2024,16 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
|
||||
if (is_ipmodify)
|
||||
goto rollback;
|
||||
|
||||
FTRACE_WARN_ON(rec->flags & FTRACE_FL_DIRECT);
|
||||
/*
|
||||
* If this is called by __modify_ftrace_direct()
|
||||
* then it is only changing where the direct
|
||||
* pointer is jumping to, and the record already
|
||||
* points to a direct trampoline. If it isn't,
|
||||
* then it is a bug to update ipmodify on a direct
|
||||
* caller.
|
||||
*/
|
||||
FTRACE_WARN_ON(!update_target &&
|
||||
(rec->flags & FTRACE_FL_DIRECT));
|
||||
|
||||
/*
|
||||
* Another ops with IPMODIFY is already
|
||||
@@ -2076,7 +2089,7 @@ static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
|
||||
if (ftrace_hash_empty(hash))
|
||||
hash = NULL;
|
||||
|
||||
return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
|
||||
return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash, false);
|
||||
}
|
||||
|
||||
/* Disabling always succeeds */
|
||||
@@ -2087,7 +2100,7 @@ static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
|
||||
if (ftrace_hash_empty(hash))
|
||||
hash = NULL;
|
||||
|
||||
__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
|
||||
__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH, false);
|
||||
}
|
||||
|
||||
static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
|
||||
@@ -2101,7 +2114,7 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
|
||||
if (ftrace_hash_empty(new_hash))
|
||||
new_hash = NULL;
|
||||
|
||||
return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
|
||||
return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash, false);
|
||||
}
|
||||
|
||||
static void print_ip_ins(const char *fmt, const unsigned char *p)
|
||||
@@ -6114,7 +6127,7 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
|
||||
static int
|
||||
__modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
|
||||
{
|
||||
struct ftrace_hash *hash;
|
||||
struct ftrace_hash *hash = ops->func_hash->filter_hash;
|
||||
struct ftrace_func_entry *entry, *iter;
|
||||
static struct ftrace_ops tmp_ops = {
|
||||
.func = ftrace_stub,
|
||||
@@ -6134,13 +6147,21 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Call __ftrace_hash_update_ipmodify() here, so that we can call
|
||||
* ops->ops_func for the ops. This is needed because the above
|
||||
* register_ftrace_function_nolock() worked on tmp_ops.
|
||||
*/
|
||||
err = __ftrace_hash_update_ipmodify(ops, hash, hash, true);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now the ftrace_ops_list_func() is called to do the direct callers.
|
||||
* We can safely change the direct functions attached to each entry.
|
||||
*/
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
hash = ops->func_hash->filter_hash;
|
||||
size = 1 << hash->size_bits;
|
||||
for (i = 0; i < size; i++) {
|
||||
hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
|
||||
@@ -6155,6 +6176,7 @@ __modify_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
|
||||
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
out:
|
||||
/* Removing the tmp_ops will add the updated direct callers to the functions */
|
||||
unregister_ftrace_function(&tmp_ops);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user