mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 10:00:17 +00:00
sched/ext: convert scx_tasks_lock to raw spinlock
Update scx_task_locks so that it's safe to lock/unlock in a non-sleepable context in PREEMPT_RT kernels. scx_task_locks is (non-raw) spinlock used to protect the list of tasks under SCX. This list is updated during from finish_task_switch(), which cannot sleep. Regular spinlocks can be locked in such a context in non-RT kernels, but are sleepable under when CONFIG_PREEMPT_RT=y. Convert scx_task_locks into a raw spinlock, which is not sleepable even on RT kernels. Sample backtrace: <TASK> dump_stack_lvl+0x83/0xa0 __might_resched+0x14a/0x200 rt_spin_lock+0x61/0x1c0 ? sched_ext_dead+0x2d/0xf0 ? lock_release+0xc6/0x280 sched_ext_dead+0x2d/0xf0 ? srso_alias_return_thunk+0x5/0xfbef5 finish_task_switch.isra.0+0x254/0x360 __schedule+0x584/0x11d0 ? srso_alias_return_thunk+0x5/0xfbef5 ? srso_alias_return_thunk+0x5/0xfbef5 ? tick_nohz_idle_exit+0x7e/0x120 schedule_idle+0x23/0x40 cpu_startup_entry+0x29/0x30 start_secondary+0xf8/0x100 common_startup_64+0x13e/0x148 </TASK> Signed-off-by: Emil Tsalapatis <emil@etsalapatis.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
committed by
Tejun Heo
parent
5f02151c41
commit
c87488a123
@@ -25,7 +25,7 @@ static struct scx_sched __rcu *scx_root;
|
||||
* guarantee system safety. Maintain a dedicated task list which contains every
|
||||
* task between its fork and eventual free.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(scx_tasks_lock);
|
||||
static DEFINE_RAW_SPINLOCK(scx_tasks_lock);
|
||||
static LIST_HEAD(scx_tasks);
|
||||
|
||||
/* ops enable/disable */
|
||||
@@ -476,7 +476,7 @@ static void scx_task_iter_start(struct scx_task_iter *iter)
|
||||
BUILD_BUG_ON(__SCX_DSQ_ITER_ALL_FLAGS &
|
||||
((1U << __SCX_DSQ_LNODE_PRIV_SHIFT) - 1));
|
||||
|
||||
spin_lock_irq(&scx_tasks_lock);
|
||||
raw_spin_lock_irq(&scx_tasks_lock);
|
||||
|
||||
iter->cursor = (struct sched_ext_entity){ .flags = SCX_TASK_CURSOR };
|
||||
list_add(&iter->cursor.tasks_node, &scx_tasks);
|
||||
@@ -507,14 +507,14 @@ static void scx_task_iter_unlock(struct scx_task_iter *iter)
|
||||
__scx_task_iter_rq_unlock(iter);
|
||||
if (iter->list_locked) {
|
||||
iter->list_locked = false;
|
||||
spin_unlock_irq(&scx_tasks_lock);
|
||||
raw_spin_unlock_irq(&scx_tasks_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void __scx_task_iter_maybe_relock(struct scx_task_iter *iter)
|
||||
{
|
||||
if (!iter->list_locked) {
|
||||
spin_lock_irq(&scx_tasks_lock);
|
||||
raw_spin_lock_irq(&scx_tasks_lock);
|
||||
iter->list_locked = true;
|
||||
}
|
||||
}
|
||||
@@ -2940,9 +2940,9 @@ void scx_post_fork(struct task_struct *p)
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irq(&scx_tasks_lock);
|
||||
raw_spin_lock_irq(&scx_tasks_lock);
|
||||
list_add_tail(&p->scx.tasks_node, &scx_tasks);
|
||||
spin_unlock_irq(&scx_tasks_lock);
|
||||
raw_spin_unlock_irq(&scx_tasks_lock);
|
||||
|
||||
percpu_up_read(&scx_fork_rwsem);
|
||||
}
|
||||
@@ -2966,9 +2966,9 @@ void sched_ext_free(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&scx_tasks_lock, flags);
|
||||
raw_spin_lock_irqsave(&scx_tasks_lock, flags);
|
||||
list_del_init(&p->scx.tasks_node);
|
||||
spin_unlock_irqrestore(&scx_tasks_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
|
||||
|
||||
/*
|
||||
* @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
|
||||
|
||||
Reference in New Issue
Block a user