Files
linux/drivers/gpu/drm/msm/msm_atomic.c
Daniel Vetter 0c0c5972ca drm/atomic-helpers: remove legacy_cursor_update hacks
The stuff never really worked, and leads to lots of fun because it
out-of-order frees atomic states. Which upsets KASAN, among other
things.

For async updates we now have a more solid solution with the
->atomic_async_check and ->atomic_async_commit hooks. Support for that
for msm and vc4 landed. nouveau and i915 have their own commit
routines, doing something similar.

For everyone else it's probably better to remove the use-after-free
bug, and encourage folks to use the async support instead. The
affected drivers which register a legacy cursor plane and don't either
use the new async stuff or their own commit routine are: amdgpu,
atmel, mediatek, qxl, rockchip, sti, sun4i, tegra, virtio, and vmwgfx.

Inspired by an amdgpu bug report.

v2: Drop RFC, I think with amdgpu converted over to use
atomic_async_check/commit done in

commit 674e78acae
Author: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Date:   Wed Dec 5 14:59:07 2018 -0500

    drm/amd/display: Add fast path for cursor plane updates

we don't have any driver anymore where we have userspace expecting
solid legacy cursor support _and_ they are using the atomic helpers in
their fully glory. So we can retire this.

v3: Paper over msm and i915 regression. The complete_all is the only
thing missing afaict.

v4: Rebased on recent kernel, added extra link for vc4 bug.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=199425
Link: https://lore.kernel.org/all/20220221134155.125447-9-maxime@cerno.tech/
Cc: mikita.lipski@amd.com
Cc: Michel Dänzer <michel@daenzer.net>
Cc: harry.wentland@amd.com
Cc: Rob Clark <robdclark@gmail.com>
Cc: "Kazlauskas, Nicholas" <nicholas.kazlauskas@amd.com>
Tested-by: Maxime Ripard <maxime@cerno.tech>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
2024-11-25 12:32:54 +00:00

314 lines
7.6 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_vblank.h>
#include "msm_atomic_trace.h"
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
/*
* Helpers to control vblanks while we flush.. basically just to ensure
* that vblank accounting is switched on, so we get valid seqn/timestamp
* on pageflip events (if requested)
*/
static void vblank_get(struct msm_kms *kms, unsigned crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
drm_crtc_vblank_get(crtc);
}
}
static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
if (!crtc->state->active)
continue;
drm_crtc_vblank_put(crtc);
}
}
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
int crtc_index;
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
crtc_index = drm_crtc_index(crtc);
mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
}
}
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
struct drm_crtc *crtc;
for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask)
mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]);
}
static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx)
{
unsigned crtc_mask = BIT(crtc_idx);
trace_msm_atomic_async_commit_start(crtc_mask);
lock_crtcs(kms, crtc_mask);
if (!(kms->pending_crtc_mask & crtc_mask)) {
unlock_crtcs(kms, crtc_mask);
goto out;
}
kms->pending_crtc_mask &= ~crtc_mask;
kms->funcs->enable_commit(kms);
vblank_get(kms, crtc_mask);
/*
* Flush hardware updates:
*/
trace_msm_atomic_flush_commit(crtc_mask);
kms->funcs->flush_commit(kms, crtc_mask);
/*
* Wait for flush to complete:
*/
trace_msm_atomic_wait_flush_start(crtc_mask);
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
vblank_put(kms, crtc_mask);
kms->funcs->complete_commit(kms, crtc_mask);
unlock_crtcs(kms, crtc_mask);
kms->funcs->disable_commit(kms);
out:
trace_msm_atomic_async_commit_finish(crtc_mask);
}
static void msm_atomic_pending_work(struct kthread_work *work)
{
struct msm_pending_timer *timer = container_of(work,
struct msm_pending_timer, work.work);
msm_atomic_async_commit(timer->kms, timer->crtc_idx);
}
int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
struct msm_kms *kms, int crtc_idx)
{
timer->kms = kms;
timer->crtc_idx = crtc_idx;
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
int ret = PTR_ERR(timer->worker);
timer->worker = NULL;
return ret;
}
sched_set_fifo(timer->worker->task);
msm_hrtimer_work_init(&timer->work, timer->worker,
msm_atomic_pending_work,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer)
{
if (timer->worker)
kthread_destroy_worker(timer->worker);
}
static bool can_do_async(struct drm_atomic_state *state,
struct drm_crtc **async_crtc)
{
struct drm_connector_state *connector_state;
struct drm_connector *connector;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
int i, num_crtcs = 0;
if (!(state->legacy_cursor_update || state->async_update))
return false;
/* any connector change, means slow path: */
for_each_new_connector_in_state(state, connector, connector_state, i)
return false;
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
return false;
if (!crtc_state->active)
return false;
if (++num_crtcs > 1)
return false;
*async_crtc = crtc;
}
return true;
}
/* Get bitmask of crtcs that will need to be flushed. The bitmask
* can be used with for_each_crtc_mask() iterator, to iterate
* effected crtcs without needing to preserve the atomic state.
*/
static unsigned get_crtc_mask(struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
unsigned i, mask = 0;
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
mask |= drm_crtc_mask(crtc);
return mask;
}
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if ((old_crtc_state->ctm && !new_crtc_state->ctm) ||
(!old_crtc_state->ctm && new_crtc_state->ctm)) {
new_crtc_state->mode_changed = true;
state->allow_modeset = true;
}
}
return drm_atomic_helper_check(dev, state);
}
void msm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct drm_crtc *async_crtc = NULL;
unsigned crtc_mask = get_crtc_mask(state);
bool async = can_do_async(state, &async_crtc);
trace_msm_atomic_commit_tail_start(async, crtc_mask);
kms->funcs->enable_commit(kms);
/*
* Ensure any previous (potentially async) commit has
* completed:
*/
lock_crtcs(kms, crtc_mask);
trace_msm_atomic_wait_flush_start(crtc_mask);
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
/*
* Now that there is no in-progress flush, prepare the
* current update:
*/
if (kms->funcs->prepare_commit)
kms->funcs->prepare_commit(kms, state);
/*
* Push atomic updates down to hardware:
*/
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
drm_atomic_helper_commit_modeset_enables(dev, state);
if (async) {
struct msm_pending_timer *timer =
&kms->pending_timers[drm_crtc_index(async_crtc)];
/* async updates are limited to single-crtc updates: */
WARN_ON(crtc_mask != drm_crtc_mask(async_crtc));
complete_all(&async_crtc->state->commit->flip_done);
/*
* Start timer if we don't already have an update pending
* on this crtc:
*/
if (!(kms->pending_crtc_mask & crtc_mask)) {
ktime_t vsync_time, wakeup_time;
kms->pending_crtc_mask |= crtc_mask;
if (drm_crtc_next_vblank_start(async_crtc, &vsync_time))
goto fallback;
wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
msm_hrtimer_queue_work(&timer->work, wakeup_time,
HRTIMER_MODE_ABS);
}
kms->funcs->disable_commit(kms);
unlock_crtcs(kms, crtc_mask);
/*
* At this point, from drm core's perspective, we
* are done with the atomic update, so we can just
* go ahead and signal that it is done:
*/
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
trace_msm_atomic_commit_tail_finish(async, crtc_mask);
return;
}
fallback:
/*
* If there is any async flush pending on updated crtcs, fold
* them into the current flush.
*/
kms->pending_crtc_mask &= ~crtc_mask;
vblank_get(kms, crtc_mask);
/*
* Flush hardware updates:
*/
trace_msm_atomic_flush_commit(crtc_mask);
kms->funcs->flush_commit(kms, crtc_mask);
unlock_crtcs(kms, crtc_mask);
/*
* Wait for flush to complete:
*/
trace_msm_atomic_wait_flush_start(crtc_mask);
kms->funcs->wait_flush(kms, crtc_mask);
trace_msm_atomic_wait_flush_finish(crtc_mask);
vblank_put(kms, crtc_mask);
lock_crtcs(kms, crtc_mask);
kms->funcs->complete_commit(kms, crtc_mask);
unlock_crtcs(kms, crtc_mask);
kms->funcs->disable_commit(kms);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
trace_msm_atomic_commit_tail_finish(async, crtc_mask);
}