mirror of
https://github.com/raspberrypi/linux.git
synced 2026-01-04 18:27:36 +00:00
Merge branch 'linus' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -103,7 +103,7 @@ out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static bool __always_inline
|
||||
static __always_inline bool
|
||||
ring_buffer_has_space(unsigned long head, unsigned long tail,
|
||||
unsigned long data_size, unsigned int size,
|
||||
bool backward)
|
||||
@@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
|
||||
return CIRC_SPACE(tail, head, data_size) >= size;
|
||||
}
|
||||
|
||||
static int __always_inline
|
||||
static __always_inline int
|
||||
__perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_event *event, unsigned int size,
|
||||
bool backward)
|
||||
@@ -414,7 +414,7 @@ err:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_aux_output_begin);
|
||||
|
||||
static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
|
||||
static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
|
||||
{
|
||||
if (rb->aux_overwrite)
|
||||
return false;
|
||||
|
||||
@@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
|
||||
BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
|
||||
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
|
||||
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
|
||||
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
|
||||
};
|
||||
|
||||
static void
|
||||
|
||||
@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
|
||||
this.parent = NULL;
|
||||
this.class = class;
|
||||
|
||||
local_irq_save(flags);
|
||||
raw_local_irq_save(flags);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
ret = __lockdep_count_forward_deps(&this);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
local_irq_restore(flags);
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
|
||||
this.parent = NULL;
|
||||
this.class = class;
|
||||
|
||||
local_irq_save(flags);
|
||||
raw_local_irq_save(flags);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
ret = __lockdep_count_backward_deps(&this);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
local_irq_restore(flags);
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
raw_local_irq_save(flags);
|
||||
for (i = 0; i < curr->lockdep_depth; i++) {
|
||||
hlock = curr->held_locks + i;
|
||||
|
||||
@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
|
||||
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
|
||||
break;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
|
||||
|
||||
|
||||
@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
|
||||
might_sleep();
|
||||
|
||||
__down_read(sem);
|
||||
rwsem_set_reader_owned(sem);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(down_read_non_owner);
|
||||
|
||||
@@ -251,10 +251,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
|
||||
* respect to other threads scheduled on the same CPU, and with respect
|
||||
* to signal handlers.
|
||||
*/
|
||||
void __rseq_handle_notify_resume(struct pt_regs *regs)
|
||||
void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *t = current;
|
||||
int ret;
|
||||
int ret, sig;
|
||||
|
||||
if (unlikely(t->flags & PF_EXITING))
|
||||
return;
|
||||
@@ -268,7 +268,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
|
||||
return;
|
||||
|
||||
error:
|
||||
force_sig(SIGSEGV, t);
|
||||
sig = ksig ? ksig->sig : 0;
|
||||
force_sigsegv(sig, t);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RSEQ
|
||||
|
||||
@@ -139,9 +139,13 @@ static void __local_bh_enable(unsigned int cnt)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (preempt_count() == cnt)
|
||||
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
|
||||
|
||||
if (softirq_count() == (cnt & SOFTIRQ_MASK))
|
||||
trace_softirqs_on(_RET_IP_);
|
||||
preempt_count_sub(cnt);
|
||||
|
||||
__preempt_count_sub(cnt);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
|
||||
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
|
||||
{
|
||||
switch(restart->nanosleep.type) {
|
||||
#ifdef CONFIG_COMPAT
|
||||
#ifdef CONFIG_COMPAT_32BIT_TIME
|
||||
case TT_COMPAT:
|
||||
if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
|
||||
return -EFAULT;
|
||||
|
||||
@@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
|
||||
/*
|
||||
* Disarm any old timer after extracting its expiry time.
|
||||
*/
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
ret = 0;
|
||||
old_incr = timer->it.cpu.incr;
|
||||
@@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
|
||||
/*
|
||||
* Now re-arm for the new expiry time.
|
||||
*/
|
||||
lockdep_assert_irqs_disabled();
|
||||
arm_timer(timer);
|
||||
unlock:
|
||||
unlock_task_sighand(p, &flags);
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/timekeeper_internal.h>
|
||||
@@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
|
||||
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
|
||||
#else
|
||||
# if BITS_PER_LONG == 32
|
||||
return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
|
||||
return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
|
||||
HZ_TO_MSEC_SHR32;
|
||||
# else
|
||||
return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
|
||||
return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
void
|
||||
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
{
|
||||
struct ring_buffer *buf;
|
||||
|
||||
if (tr->stop_count)
|
||||
return;
|
||||
|
||||
@@ -1375,9 +1373,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
|
||||
buf = tr->trace_buffer.buffer;
|
||||
tr->trace_buffer.buffer = tr->max_buffer.buffer;
|
||||
tr->max_buffer.buffer = buf;
|
||||
swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
|
||||
|
||||
__update_max_tr(tr, tsk, cpu);
|
||||
arch_spin_unlock(&tr->max_lock);
|
||||
|
||||
@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
|
||||
C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
|
||||
C(INVALID_FILTER, "Meaningless filter expression"), \
|
||||
C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
|
||||
C(INVALID_VALUE, "Invalid value (did you forget quotes)?"),
|
||||
C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
|
||||
C(NO_FILTER, "No filter found"),
|
||||
|
||||
#undef C
|
||||
#define C(a, b) FILT_ERR_##a
|
||||
@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!N) {
|
||||
/* No program? */
|
||||
ret = -EINVAL;
|
||||
parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
prog[N].pred = NULL; /* #13 */
|
||||
prog[N].target = 1; /* TRUE */
|
||||
prog[N+1].pred = NULL;
|
||||
|
||||
Reference in New Issue
Block a user