mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
[ Upstream commit 2c8c89b958 ]
The paravit queued spinlock slow path adds itself to the queue then
calls pv_wait to wait for the lock to become free. This is implemented
by calling H_CONFER to donate cycles.
When hcall tracing is enabled, this H_CONFER call can lead to a spin
lock being taken in the tracing code, which will result in the lock to
be taken again, which will also go to the slow path because it queues
behind itself and so won't ever make progress.
An example trace of a deadlock:
__pv_queued_spin_lock_slowpath
trace_clock_global
ring_buffer_lock_reserve
trace_event_buffer_lock_reserve
trace_event_buffer_reserve
trace_event_raw_event_hcall_exit
__trace_hcall_exit
plpar_hcall_norets_trace
__pv_queued_spin_lock_slowpath
trace_clock_global
ring_buffer_lock_reserve
trace_event_buffer_lock_reserve
trace_event_buffer_reserve
trace_event_raw_event_rcu_dyntick
rcu_irq_exit
irq_exit
__do_irq
call_do_irq
do_IRQ
hardware_interrupt_common_virt
Fix this by introducing plpar_hcall_norets_notrace(), and using that to
make SPLPAR virtual processor dispatching hcalls by the paravirt
spinlock code.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210508101455.1578318-2-npiggin@gmail.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
104 lines
2.7 KiB
C
104 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
#ifndef _ASM_POWERPC_PARAVIRT_H
|
|
#define _ASM_POWERPC_PARAVIRT_H
|
|
|
|
#include <linux/jump_label.h>
|
|
#include <asm/smp.h>
|
|
#ifdef CONFIG_PPC64
|
|
#include <asm/paca.h>
|
|
#include <asm/hvcall.h>
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_SPLPAR
|
|
DECLARE_STATIC_KEY_FALSE(shared_processor);
|
|
|
|
static inline bool is_shared_processor(void)
|
|
{
|
|
return static_branch_unlikely(&shared_processor);
|
|
}
|
|
|
|
/* If bit 0 is set, the cpu has been preempted */
|
|
static inline u32 yield_count_of(int cpu)
|
|
{
|
|
__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
|
|
return be32_to_cpu(yield_count);
|
|
}
|
|
|
|
/*
|
|
* Spinlock code confers and prods, so don't trace the hcalls because the
|
|
* tracing code takes spinlocks which can cause recursion deadlocks.
|
|
*
|
|
* These calls are made while the lock is not held: the lock slowpath yields if
|
|
* it can not acquire the lock, and unlock slow path might prod if a waiter has
|
|
* yielded). So this may not be a problem for simple spin locks because the
|
|
* tracing does not technically recurse on the lock, but we avoid it anyway.
|
|
*
|
|
* However the queued spin lock contended path is more strictly ordered: the
|
|
* H_CONFER hcall is made after the task has queued itself on the lock, so then
|
|
* recursing on that lock will cause the task to then queue up again behind the
|
|
* first instance (or worse: queued spinlocks use tricks that assume a context
|
|
* never waits on more than one spinlock, so such recursion may cause random
|
|
* corruption in the lock code).
|
|
*/
|
|
static inline void yield_to_preempted(int cpu, u32 yield_count)
|
|
{
|
|
plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
|
|
}
|
|
|
|
static inline void prod_cpu(int cpu)
|
|
{
|
|
plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
|
|
}
|
|
|
|
static inline void yield_to_any(void)
|
|
{
|
|
plpar_hcall_norets_notrace(H_CONFER, -1, 0);
|
|
}
|
|
#else
|
|
static inline bool is_shared_processor(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline u32 yield_count_of(int cpu)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
extern void ___bad_yield_to_preempted(void);
|
|
static inline void yield_to_preempted(int cpu, u32 yield_count)
|
|
{
|
|
___bad_yield_to_preempted(); /* This would be a bug */
|
|
}
|
|
|
|
extern void ___bad_yield_to_any(void);
|
|
static inline void yield_to_any(void)
|
|
{
|
|
___bad_yield_to_any(); /* This would be a bug */
|
|
}
|
|
|
|
extern void ___bad_prod_cpu(void);
|
|
static inline void prod_cpu(int cpu)
|
|
{
|
|
___bad_prod_cpu(); /* This would be a bug */
|
|
}
|
|
|
|
#endif
|
|
|
|
#define vcpu_is_preempted vcpu_is_preempted
|
|
static inline bool vcpu_is_preempted(int cpu)
|
|
{
|
|
if (!is_shared_processor())
|
|
return false;
|
|
if (yield_count_of(cpu) & 1)
|
|
return true;
|
|
return false;
|
|
}
|
|
|
|
static inline bool pv_is_native_spin_unlock(void)
|
|
{
|
|
return !is_shared_processor();
|
|
}
|
|
|
|
#endif /* _ASM_POWERPC_PARAVIRT_H */
|