mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-15 22:41:38 +00:00
Merge branch 'sched/urgent' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -3862,10 +3862,8 @@ static void _free_event(struct perf_event *event)
|
||||
if (event->ctx)
|
||||
put_ctx(event->ctx);
|
||||
|
||||
if (event->pmu) {
|
||||
exclusive_event_destroy(event);
|
||||
module_put(event->pmu->module);
|
||||
}
|
||||
exclusive_event_destroy(event);
|
||||
module_put(event->pmu->module);
|
||||
|
||||
call_rcu(&event->rcu_head, free_event_rcu);
|
||||
}
|
||||
|
||||
@@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
|
||||
{
|
||||
unsigned long address = (unsigned long)uaddr;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct page *page;
|
||||
struct page *page, *tail;
|
||||
struct address_space *mapping;
|
||||
int err, ro = 0;
|
||||
|
||||
@@ -530,7 +530,15 @@ again:
|
||||
* considered here and page lock forces unnecessarily serialization
|
||||
* From this point on, mapping will be re-verified if necessary and
|
||||
* page lock will be acquired only if it is unavoidable
|
||||
*
|
||||
* Mapping checks require the head page for any compound page so the
|
||||
* head page and mapping is looked up now. For anonymous pages, it
|
||||
* does not matter if the page splits in the future as the key is
|
||||
* based on the address. For filesystem-backed pages, the tail is
|
||||
* required as the index of the page determines the key. For
|
||||
* base pages, there is no tail page and tail == page.
|
||||
*/
|
||||
tail = page;
|
||||
page = compound_head(page);
|
||||
mapping = READ_ONCE(page->mapping);
|
||||
|
||||
@@ -654,7 +662,7 @@ again:
|
||||
|
||||
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
|
||||
key->shared.inode = inode;
|
||||
key->shared.pgoff = basepage_index(page);
|
||||
key->shared.pgoff = basepage_index(tail);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
||||
@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
if (!hold_ctx)
|
||||
return 0;
|
||||
|
||||
if (unlikely(ctx == hold_ctx))
|
||||
return -EALREADY;
|
||||
|
||||
if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
|
||||
(ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
||||
|
||||
|
||||
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
|
||||
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
|
||||
#endif
|
||||
|
||||
/*
|
||||
* queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
|
||||
* issuing an _unordered_ store to set _Q_LOCKED_VAL.
|
||||
*
|
||||
* This means that the store can be delayed, but no later than the
|
||||
* store-release from the unlock. This means that simply observing
|
||||
* _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
|
||||
*
|
||||
* There are two paths that can issue the unordered store:
|
||||
*
|
||||
* (1) clear_pending_set_locked(): *,1,0 -> *,0,1
|
||||
*
|
||||
* (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
|
||||
* atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
|
||||
*
|
||||
* However, in both cases we have other !0 state we've set before to queue
|
||||
* ourseves:
|
||||
*
|
||||
* For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
|
||||
* load is constrained by that ACQUIRE to not pass before that, and thus must
|
||||
* observe the store.
|
||||
*
|
||||
* For (2) we have a more intersting scenario. We enqueue ourselves using
|
||||
* xchg_tail(), which ends up being a RELEASE. This in itself is not
|
||||
* sufficient, however that is followed by an smp_cond_acquire() on the same
|
||||
* word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
|
||||
* guarantees we must observe that store.
|
||||
*
|
||||
* Therefore both cases have other !0 state that is observable before the
|
||||
* unordered locked byte store comes through. This means we can use that to
|
||||
* wait for the lock store, and then wait for an unlock.
|
||||
*/
|
||||
#ifndef queued_spin_unlock_wait
|
||||
void queued_spin_unlock_wait(struct qspinlock *lock)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
for (;;) {
|
||||
val = atomic_read(&lock->val);
|
||||
|
||||
if (!val) /* not locked, we're done */
|
||||
goto done;
|
||||
|
||||
if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
|
||||
break;
|
||||
|
||||
/* not locked, but pending, wait until we observe the lock */
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* any unlock is good */
|
||||
while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
|
||||
cpu_relax();
|
||||
|
||||
done:
|
||||
smp_rmb(); /* CTRL + RMB -> ACQUIRE */
|
||||
}
|
||||
EXPORT_SYMBOL(queued_spin_unlock_wait);
|
||||
#endif
|
||||
|
||||
#endif /* _GEN_PV_LOCK_SLOWPATH */
|
||||
|
||||
/**
|
||||
|
||||
@@ -614,6 +614,7 @@ free_bufs:
|
||||
|
||||
kref_put(&chan->kref, relay_destroy_channel);
|
||||
mutex_unlock(&relay_channels_mutex);
|
||||
kfree(chan);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(relay_open);
|
||||
|
||||
@@ -2535,10 +2535,9 @@ void wake_up_new_task(struct task_struct *p)
|
||||
*/
|
||||
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
|
||||
#endif
|
||||
/* Post initialize new task's util average when its cfs_rq is set */
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
post_init_entity_util_avg(&p->se);
|
||||
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
activate_task(rq, p, 0);
|
||||
p->on_rq = TASK_ON_RQ_QUEUED;
|
||||
trace_sched_wakeup_new(p);
|
||||
@@ -3170,7 +3169,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
|
||||
static inline void schedule_debug(struct task_struct *prev)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_STACK_END_CHECK
|
||||
BUG_ON(task_stack_end_corrupted(prev));
|
||||
if (task_stack_end_corrupted(prev))
|
||||
panic("corrupted stack end detected inside scheduler\n");
|
||||
#endif
|
||||
|
||||
if (unlikely(in_atomic_preempt_off())) {
|
||||
|
||||
@@ -8468,8 +8468,9 @@ void free_fair_sched_group(struct task_group *tg)
|
||||
|
||||
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
||||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se;
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct rq *rq;
|
||||
int i;
|
||||
|
||||
tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
|
||||
@@ -8484,6 +8485,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
||||
init_cfs_bandwidth(tg_cfs_bandwidth(tg));
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
rq = cpu_rq(i);
|
||||
|
||||
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
|
||||
GFP_KERNEL, cpu_to_node(i));
|
||||
if (!cfs_rq)
|
||||
@@ -8497,7 +8500,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
||||
init_cfs_rq(cfs_rq);
|
||||
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
|
||||
init_entity_runnable_average(se);
|
||||
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
post_init_entity_util_avg(se);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
@@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
*/
|
||||
static void cpuidle_idle_call(void)
|
||||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_device *dev = cpuidle_get_device();
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
int next_state, entered_state;
|
||||
|
||||
|
||||
@@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
|
||||
if (unlikely(index >= array->map.max_entries))
|
||||
return -E2BIG;
|
||||
|
||||
file = (struct file *)array->ptrs[index];
|
||||
file = READ_ONCE(array->ptrs[index]);
|
||||
if (unlikely(!file))
|
||||
return -ENOENT;
|
||||
|
||||
@@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size)
|
||||
if (unlikely(index >= array->map.max_entries))
|
||||
return -E2BIG;
|
||||
|
||||
file = (struct file *)array->ptrs[index];
|
||||
file = READ_ONCE(array->ptrs[index]);
|
||||
if (unlikely(!file))
|
||||
return -ENOENT;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user