mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-08 02:49:48 +00:00
commitea81174789upstream. Mike reported that commit7d1a9417("x86: Use generic idle loop") regressed several workloads and caused excessive reschedule interrupts. The patch in question failed to notice that the x86 code had an inverted sense of the polling state versus the new generic code (x86: default polling, generic: default !polling). Fix the two prominent x86 mwait based idle drivers and introduce a few new generic polling helpers (fixing the wrong smp_mb__after_clear_bit usage). Also switch the idle routines to using tif_need_resched() which is an immediate TIF_NEED_RESCHED test as opposed to need_resched which will end up being slightly different. Reported-by: Mike Galbraith <bitbucket@online.de> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: lenb@kernel.org Cc: tglx@linutronix.de Link: http://lkml.kernel.org/n/tip-nc03imb0etuefmzybzj7sprf@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
135 lines
3.0 KiB
C
135 lines
3.0 KiB
C
/*
|
|
* Generic entry point for the idle threads
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/stackprotector.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <trace/events/power.h>
|
|
|
|
static int __read_mostly cpu_idle_force_poll;
|
|
|
|
void cpu_idle_poll_ctrl(bool enable)
|
|
{
|
|
if (enable) {
|
|
cpu_idle_force_poll++;
|
|
} else {
|
|
cpu_idle_force_poll--;
|
|
WARN_ON_ONCE(cpu_idle_force_poll < 0);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
|
|
static int __init cpu_idle_poll_setup(char *__unused)
|
|
{
|
|
cpu_idle_force_poll = 1;
|
|
return 1;
|
|
}
|
|
__setup("nohlt", cpu_idle_poll_setup);
|
|
|
|
static int __init cpu_idle_nopoll_setup(char *__unused)
|
|
{
|
|
cpu_idle_force_poll = 0;
|
|
return 1;
|
|
}
|
|
__setup("hlt", cpu_idle_nopoll_setup);
|
|
#endif
|
|
|
|
static inline int cpu_idle_poll(void)
|
|
{
|
|
rcu_idle_enter();
|
|
trace_cpu_idle_rcuidle(0, smp_processor_id());
|
|
local_irq_enable();
|
|
while (!tif_need_resched())
|
|
cpu_relax();
|
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
|
rcu_idle_exit();
|
|
return 1;
|
|
}
|
|
|
|
/* Weak implementations for optional arch specific functions */
|
|
void __weak arch_cpu_idle_prepare(void) { }
|
|
void __weak arch_cpu_idle_enter(void) { }
|
|
void __weak arch_cpu_idle_exit(void) { }
|
|
void __weak arch_cpu_idle_dead(void) { }
|
|
void __weak arch_cpu_idle(void)
|
|
{
|
|
cpu_idle_force_poll = 1;
|
|
local_irq_enable();
|
|
}
|
|
|
|
/*
|
|
* Generic idle loop implementation
|
|
*/
|
|
static void cpu_idle_loop(void)
|
|
{
|
|
while (1) {
|
|
tick_nohz_idle_enter();
|
|
|
|
while (!need_resched()) {
|
|
check_pgt_cache();
|
|
rmb();
|
|
|
|
if (cpu_is_offline(smp_processor_id()))
|
|
arch_cpu_idle_dead();
|
|
|
|
local_irq_disable();
|
|
arch_cpu_idle_enter();
|
|
|
|
/*
|
|
* In poll mode we reenable interrupts and spin.
|
|
*
|
|
* Also if we detected in the wakeup from idle
|
|
* path that the tick broadcast device expired
|
|
* for us, we don't want to go deep idle as we
|
|
* know that the IPI is going to arrive right
|
|
* away
|
|
*/
|
|
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
|
|
cpu_idle_poll();
|
|
} else {
|
|
if (!current_clr_polling_and_test()) {
|
|
stop_critical_timings();
|
|
rcu_idle_enter();
|
|
arch_cpu_idle();
|
|
WARN_ON_ONCE(irqs_disabled());
|
|
rcu_idle_exit();
|
|
start_critical_timings();
|
|
} else {
|
|
local_irq_enable();
|
|
}
|
|
__current_set_polling();
|
|
}
|
|
arch_cpu_idle_exit();
|
|
}
|
|
tick_nohz_idle_exit();
|
|
schedule_preempt_disabled();
|
|
}
|
|
}
|
|
|
|
void cpu_startup_entry(enum cpuhp_state state)
|
|
{
|
|
/*
|
|
* This #ifdef needs to die, but it's too late in the cycle to
|
|
* make this generic (arm and sh have never invoked the canary
|
|
* init for the non boot cpus!). Will be fixed in 3.11
|
|
*/
|
|
#ifdef CONFIG_X86
|
|
/*
|
|
* If we're the non-boot CPU, nothing set the stack canary up
|
|
* for us. The boot CPU already has it initialized but no harm
|
|
* in doing it again. This is a good place for updating it, as
|
|
* we wont ever return from this function (so the invalid
|
|
* canaries already on the stack wont ever trigger).
|
|
*/
|
|
boot_init_stack_canary();
|
|
#endif
|
|
__current_set_polling();
|
|
arch_cpu_idle_prepare();
|
|
cpu_idle_loop();
|
|
}
|