Files
linux/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
Arvind Yadav f15d4e92f7 drm/amdgpu: Fix display freeze lockup error
A deadlock situation has arised between the userq
signal ioctl and the eviction fence. In this scenario,
the function amdgpu_userq_signal_ioctl() has acquired a reservation
lock on the read/write buffer object (BO) through drm_exec.
Subsequently, it calls amdgpu_userqueue_ensure_ev_fence(),
which is in a waiting for the userq resume work.
Meanwhile, the userq suspend worker has initiated the userq resume
work(amdgpu_userqueue_resume_worker). This userq resume work attempts
to validate the vm->done BO, leading to amdgpu_userqueue_validate_bos
also attempting to reservation lock the same write BO that is already
locked by amdgpu_userq_signal_ioctl.
As a result, the resume work becomes stalled, causing
amdgpu_userqueue_ensure_ev_fence to remain in a waiting state.

Call Trace:
[  242.836469] INFO: task gnome-shel:cs0:1288 blocked for more than 120 seconds.
[  242.836486]       Tainted: G           OE      6.12.0-rc2rebased-oct-24+ #4
[  242.836491] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[  242.836494] task:gnome-shel:cs0  state:D stack:0     pid:1288  tgid:1282  ppid:1180   flags:0x00000002
[  242.836503] Call Trace:
[  242.836508]  <TASK>
[  242.836517]  __schedule+0x3e0/0xb10
[  242.836530]  ? srso_return_thunk+0x5/0x5f
[  242.836541]  schedule+0x31/0x120
[  242.836546]  schedule_timeout+0x150/0x160
[  242.836551]  ? srso_return_thunk+0x5/0x5f
[  242.836555]  ? sysvec_call_function+0x69/0xd0
[  242.836562]  ? srso_return_thunk+0x5/0x5f
[  242.836567]  ? preempt_count_add+0x7f/0xd0
[  242.836577]  __wait_for_common+0x91/0x180
[  242.836582]  ? __pfx_schedule_timeout+0x10/0x10
[  242.836590]  wait_for_completion+0x28/0x30
[  242.836595]  __flush_work+0x16c/0x290
[  242.836602]  ? __pfx_wq_barrier_func+0x10/0x10
[  242.836611]  flush_delayed_work+0x3a/0x60
[  242.836621]  amdgpu_userqueue_ensure_ev_fence+0x2d/0xb0 [amdgpu]
[  242.836966]  amdgpu_userq_signal_ioctl+0x959/0xec0 [amdgpu]
[  242.837171]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.837365]  drm_ioctl_kernel+0xae/0x100 [drm]
[  242.837398]  drm_ioctl+0x2a1/0x500 [drm]
[  242.837420]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.837622]  ? srso_return_thunk+0x5/0x5f
[  242.837627]  ? srso_return_thunk+0x5/0x5f
[  242.837630]  ? _raw_spin_unlock_irqrestore+0x2b/0x50
[  242.837635]  amdgpu_drm_ioctl+0x4f/0x90 [amdgpu]
[  242.837811]  __x64_sys_ioctl+0x99/0xd0
[  242.837820]  x64_sys_call+0x1209/0x20d0
[  242.837825]  do_syscall_64+0x51/0x120
[  242.837830]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
[  242.837835] RIP: 0033:0x7f2f33f1a94f
[  242.837838] RSP: 002b:00007f2f24ffea30 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[  242.837842] RAX: ffffffffffffffda RBX: 00007f2f24ffebd0 RCX: 00007f2f33f1a94f
[  242.837845] RDX: 00007f2f24ffebd0 RSI: 00000000c0306457 RDI: 000000000000000d
[  242.837847] RBP: 00007f2f24ffeab0 R08: 0000000000000000 R09: 0000000000000000
[  242.837849] R10: 00007f2f24ffecd0 R11: 0000000000000246 R12: 00007f2f25000640
[  242.837851] R13: 00000000c0306457 R14: 000000000000000d R15: 00007fff3b39c1e0
[  242.837858]  </TASK>
[  242.837865] INFO: task Xwayland:cs0:1517 blocked for more than 120 seconds.
[  242.837869]       Tainted: G           OE      6.12.0-rc2rebased-oct-24+ #4
[  242.837872] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[  242.837874] task:Xwayland:cs0    state:D stack:0     pid:1517  tgid:1338  ppid:1282   flags:0x00004002
[  242.837878] Call Trace:
[  242.837880]  <TASK>
[  242.837883]  __schedule+0x3e0/0xb10
[  242.837890]  schedule+0x31/0x120
[  242.837894]  schedule_preempt_disabled+0x1c/0x30
[  242.837897]  __mutex_lock.constprop.0+0x386/0x6e0
[  242.837902]  ? srso_return_thunk+0x5/0x5f
[  242.837905]  ? __timer_delete_sync+0x81/0xe0
[  242.837911]  __mutex_lock_slowpath+0x13/0x20
[  242.837915]  mutex_lock+0x3b/0x50
[  242.837919]  amdgpu_userqueue_ensure_ev_fence+0x35/0xb0 [amdgpu]
[  242.838138]  amdgpu_userq_signal_ioctl+0x959/0xec0 [amdgpu]
[  242.838340]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.838531]  drm_ioctl_kernel+0xae/0x100 [drm]
[  242.838559]  drm_ioctl+0x2a1/0x500 [drm]
[  242.838580]  ? __pfx_amdgpu_userq_signal_ioctl+0x10/0x10 [amdgpu]
[  242.838778]  ? srso_return_thunk+0x5/0x5f
[  242.838783]  ? srso_return_thunk+0x5/0x5f
[  242.838786]  ? _raw_spin_unlock_irqrestore+0x2b/0x50
[  242.838791]  amdgpu_drm_ioctl+0x4f/0x90 [amdgpu]
[  242.838967]  __x64_sys_ioctl+0x99/0xd0
[  242.838972]  x64_sys_call+0x1209/0x20d0
[  242.838975]  do_syscall_64+0x51/0x120
[  242.838979]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
[  242.838982] RIP: 0033:0x7f9118b1a94f
[  242.838985] RSP: 002b:00007f910cdff760 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
[  242.838989] RAX: ffffffffffffffda RBX: 00007f910cdff910 RCX: 00007f9118b1a94f
[  242.838991] RDX: 00007f910cdff910 RSI: 00000000c0306457 RDI: 000000000000000c
[  242.838993] RBP: 00007f910cdff7e0 R08: 0000000000000000 R09: 0000000000000001
[  242.838995] R10: 00007f910cdff9d4 R11: 0000000000000246 R12: 00007f910ce00640
[  242.838997] R13: 00000000c0306457 R14: 000000000000000c R15: 00007fff9dd11d10
[  242.839004]  </TASK>

v2: Addressed review comemnts from Christian.
v3/v4: Addressed review comemnts from Christian.
   - Move drm_exec drm_exec loop after userq fence create.
   - cleanup the newly created userq fence in case of error.
v5 - Addressed review comemnts from Christian.
   - Create a new amdgpu_userq_fence_alloc() function for allocation.
   - Calling dma_fence_put for cleanup procedure.
   - make amdgpu_userq_fence_create() function static.
   - drm_exec_init is called after mutex_unlock.

Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Shashank Sharma <shashank.sharma@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Arvind Yadav <arvind.yadav@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2025-04-08 16:48:20 -04:00

954 lines
25 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/dma-fence-unwrap.h>
#include <drm/drm_exec.h>
#include <drm/drm_syncobj.h>
#include "amdgpu.h"
#include "amdgpu_userq_fence.h"
static const struct dma_fence_ops amdgpu_userq_fence_ops;
static struct kmem_cache *amdgpu_userq_fence_slab;
int amdgpu_userq_fence_slab_init(void)
{
amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
sizeof(struct amdgpu_userq_fence),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!amdgpu_userq_fence_slab)
return -ENOMEM;
return 0;
}
void amdgpu_userq_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(amdgpu_userq_fence_slab);
}
static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
{
if (!f || f->ops != &amdgpu_userq_fence_ops)
return NULL;
return container_of(f, struct amdgpu_userq_fence, base);
}
static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
{
return le64_to_cpu(*fence_drv->cpu_addr);
}
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq)
{
struct amdgpu_userq_fence_driver *fence_drv;
unsigned long flags;
int r;
fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
if (!fence_drv) {
DRM_ERROR("Failed to allocate memory for fence driver\n");
r = -ENOMEM;
goto free_fence_drv;
}
/* Acquire seq64 memory */
r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
&fence_drv->cpu_addr);
if (r) {
kfree(fence_drv);
r = -ENOMEM;
goto free_seq64;
}
memset(fence_drv->cpu_addr, 0, sizeof(u64));
kref_init(&fence_drv->refcount);
INIT_LIST_HEAD(&fence_drv->fences);
spin_lock_init(&fence_drv->fence_list_lock);
fence_drv->adev = adev;
fence_drv->fence_drv_xa_ptr = &userq->fence_drv_xa;
fence_drv->context = dma_fence_context_alloc(1);
get_task_comm(fence_drv->timeline_name, current);
xa_lock_irqsave(&adev->userq_xa, flags);
r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
fence_drv, GFP_KERNEL));
xa_unlock_irqrestore(&adev->userq_xa, flags);
if (r)
goto free_seq64;
userq->fence_drv = fence_drv;
return 0;
free_seq64:
amdgpu_seq64_free(adev, fence_drv->va);
free_fence_drv:
kfree(fence_drv);
return r;
}
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
u64 rptr;
int i;
if (!fence_drv)
return;
rptr = amdgpu_userq_fence_read(fence_drv);
spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
if (rptr < fence->seqno)
break;
dma_fence_signal(fence);
for (i = 0; i < userq_fence->fence_drv_array_count; i++)
amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
list_del(&userq_fence->link);
dma_fence_put(fence);
}
spin_unlock(&fence_drv->fence_list_lock);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
{
struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
struct amdgpu_userq_fence_driver,
refcount);
struct amdgpu_userq_fence_driver *xa_fence_drv;
struct amdgpu_device *adev = fence_drv->adev;
struct amdgpu_userq_fence *fence, *tmp;
struct xarray *xa = &adev->userq_xa;
unsigned long index, flags;
struct dma_fence *f;
spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
f = &fence->base;
if (!dma_fence_is_signaled(f)) {
dma_fence_set_error(f, -ECANCELED);
dma_fence_signal(f);
}
list_del(&fence->link);
dma_fence_put(f);
}
spin_unlock(&fence_drv->fence_list_lock);
xa_lock_irqsave(xa, flags);
xa_for_each(xa, index, xa_fence_drv)
if (xa_fence_drv == fence_drv)
__xa_erase(xa, index);
xa_unlock_irqrestore(xa, flags);
/* Free seq64 memory */
amdgpu_seq64_free(adev, fence_drv->va);
kfree(fence_drv);
}
void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
{
kref_get(&fence_drv->refcount);
}
void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
{
kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
}
#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
{
*userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
return *userq_fence ? 0 : -ENOMEM;
}
static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
struct amdgpu_userq_fence *userq_fence,
u64 seq, struct dma_fence **f)
{
struct amdgpu_userq_fence_driver *fence_drv;
struct dma_fence *fence;
unsigned long flags;
fence_drv = userq->fence_drv;
if (!fence_drv)
return -EINVAL;
spin_lock_init(&userq_fence->lock);
INIT_LIST_HEAD(&userq_fence->link);
fence = &userq_fence->base;
userq_fence->fence_drv = fence_drv;
dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
fence_drv->context, seq);
amdgpu_userq_fence_driver_get(fence_drv);
dma_fence_get(fence);
if (!xa_empty(&userq->fence_drv_xa)) {
struct amdgpu_userq_fence_driver *stored_fence_drv;
unsigned long index, count = 0;
int i = 0;
xa_lock(&userq->fence_drv_xa);
xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
count++;
userq_fence->fence_drv_array =
kvmalloc_array(count,
sizeof(struct amdgpu_userq_fence_driver *),
GFP_ATOMIC);
if (userq_fence->fence_drv_array) {
xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
userq_fence->fence_drv_array[i] = stored_fence_drv;
__xa_erase(&userq->fence_drv_xa, index);
i++;
}
}
userq_fence->fence_drv_array_count = i;
xa_unlock(&userq->fence_drv_xa);
} else {
userq_fence->fence_drv_array = NULL;
userq_fence->fence_drv_array_count = 0;
}
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
if (!dma_fence_is_signaled_locked(fence))
list_add_tail(&userq_fence->link, &fence_drv->fences);
else
dma_fence_put(fence);
spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
*f = fence;
return 0;
}
#endif
static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
{
return "amdgpu_userqueue_fence";
}
static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
return fence->fence_drv->timeline_name;
}
static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
{
struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
u64 rptr, wptr;
rptr = amdgpu_userq_fence_read(fence_drv);
wptr = fence->base.seqno;
if (rptr >= wptr)
return true;
return false;
}
static void amdgpu_userq_fence_free(struct rcu_head *rcu)
{
struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
/* Release the fence driver reference */
amdgpu_userq_fence_driver_put(fence_drv);
kvfree(userq_fence->fence_drv_array);
kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
}
static void amdgpu_userq_fence_release(struct dma_fence *f)
{
call_rcu(&f->rcu, amdgpu_userq_fence_free);
}
static const struct dma_fence_ops amdgpu_userq_fence_ops = {
.use_64bit_seqno = true,
.get_driver_name = amdgpu_userq_fence_get_driver_name,
.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
.signaled = amdgpu_userq_fence_signaled,
.release = amdgpu_userq_fence_release,
};
#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
/**
* amdgpu_userq_fence_read_wptr - Read the userq wptr value
*
* @queue: user mode queue structure pointer
* @wptr: write pointer value
*
* Read the wptr value from userq's MQD. The userq signal IOCTL
* creates a dma_fence for the shared buffers that expects the
* RPTR value written to seq64 memory >= WPTR.
*
* Returns wptr value on success, error on failure.
*/
static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
u64 *wptr)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
u64 addr, *ptr;
int r;
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
if (r)
return r;
addr = queue->userq_prop->wptr_gpu_addr;
addr &= AMDGPU_GMC_HOLE_MASK;
mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
if (!mapping) {
DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
return -EINVAL;
}
bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
amdgpu_bo_unreserve(queue->vm->root.bo);
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("Failed to reserve userqueue wptr bo");
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&ptr);
if (r) {
DRM_ERROR("Failed mapping the userqueue wptr bo");
goto map_error;
}
*wptr = le64_to_cpu(*ptr);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return 0;
map_error:
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
{
dma_fence_put(fence);
}
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
struct drm_amdgpu_userq_signal *args = data;
struct drm_gem_object **gobj_write = NULL;
struct drm_gem_object **gobj_read = NULL;
struct amdgpu_usermode_queue *queue;
struct amdgpu_userq_fence *userq_fence;
struct drm_syncobj **syncobj = NULL;
u32 *bo_handles_write, num_write_bo_handles;
u32 *syncobj_handles, num_syncobj_handles;
u32 *bo_handles_read, num_read_bo_handles;
int r, i, entry, rentry, wentry;
struct dma_fence *fence;
struct drm_exec exec;
u64 wptr;
num_syncobj_handles = args->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
sizeof(u32) * num_syncobj_handles);
if (IS_ERR(syncobj_handles))
return PTR_ERR(syncobj_handles);
/* Array of pointers to the looked up syncobjs */
syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
if (!syncobj) {
r = -ENOMEM;
goto free_syncobj_handles;
}
for (entry = 0; entry < num_syncobj_handles; entry++) {
syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
if (!syncobj[entry]) {
r = -ENOENT;
goto free_syncobj;
}
}
num_read_bo_handles = args->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
sizeof(u32) * num_read_bo_handles);
if (IS_ERR(bo_handles_read)) {
r = PTR_ERR(bo_handles_read);
goto free_syncobj;
}
/* Array of pointers to the GEM read objects */
gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
if (!gobj_read) {
r = -ENOMEM;
goto free_bo_handles_read;
}
for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
if (!gobj_read[rentry]) {
r = -ENOENT;
goto put_gobj_read;
}
}
num_write_bo_handles = args->num_bo_write_handles;
bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
sizeof(u32) * num_write_bo_handles);
if (IS_ERR(bo_handles_write)) {
r = PTR_ERR(bo_handles_write);
goto put_gobj_read;
}
/* Array of pointers to the GEM write objects */
gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
if (!gobj_write) {
r = -ENOMEM;
goto free_bo_handles_write;
}
for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
if (!gobj_write[wentry]) {
r = -ENOENT;
goto put_gobj_write;
}
}
/* Retrieve the user queue */
queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
if (!queue) {
r = -ENOENT;
goto put_gobj_write;
}
r = amdgpu_userq_fence_read_wptr(queue, &wptr);
if (r)
goto put_gobj_write;
r = amdgpu_userq_fence_alloc(&userq_fence);
if (r)
goto put_gobj_write;
/* We are here means UQ is active, make sure the eviction fence is valid */
amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
/* Create a new fence */
r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
if (r) {
mutex_unlock(&userq_mgr->userq_mutex);
kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
goto put_gobj_write;
}
dma_fence_put(queue->last_fence);
queue->last_fence = dma_fence_get(fence);
mutex_unlock(&userq_mgr->userq_mutex);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
(num_read_bo_handles + num_write_bo_handles));
/* Lock all BOs with retry handling */
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
drm_exec_retry_on_contention(&exec);
if (r) {
amdgpu_userq_fence_cleanup(fence);
goto exec_fini;
}
r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
drm_exec_retry_on_contention(&exec);
if (r) {
amdgpu_userq_fence_cleanup(fence);
goto exec_fini;
}
}
for (i = 0; i < num_read_bo_handles; i++) {
if (!gobj_read || !gobj_read[i]->resv)
continue;
dma_resv_add_fence(gobj_read[i]->resv, fence,
DMA_RESV_USAGE_READ);
}
for (i = 0; i < num_write_bo_handles; i++) {
if (!gobj_write || !gobj_write[i]->resv)
continue;
dma_resv_add_fence(gobj_write[i]->resv, fence,
DMA_RESV_USAGE_WRITE);
}
/* Add the created fence to syncobj/BO's */
for (i = 0; i < num_syncobj_handles; i++)
drm_syncobj_replace_fence(syncobj[i], fence);
/* drop the reference acquired in fence creation function */
dma_fence_put(fence);
exec_fini:
drm_exec_fini(&exec);
put_gobj_write:
while (wentry-- > 0)
drm_gem_object_put(gobj_write[wentry]);
kfree(gobj_write);
free_bo_handles_write:
kfree(bo_handles_write);
put_gobj_read:
while (rentry-- > 0)
drm_gem_object_put(gobj_read[rentry]);
kfree(gobj_read);
free_bo_handles_read:
kfree(bo_handles_read);
free_syncobj:
while (entry-- > 0)
if (syncobj[entry])
drm_syncobj_put(syncobj[entry]);
kfree(syncobj);
free_syncobj_handles:
kfree(syncobj_handles);
return r;
}
#else
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
return -ENOTSUPP;
}
#endif
#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
struct drm_amdgpu_userq_fence_info *fence_info = NULL;
struct drm_amdgpu_userq_wait *wait_info = data;
struct drm_gem_object **gobj_write;
struct drm_gem_object **gobj_read;
struct dma_fence **fences = NULL;
u16 num_points, num_fences = 0;
int r, i, rentry, wentry, cnt;
struct drm_exec exec;
num_read_bo_handles = wait_info->num_bo_read_handles;
bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
sizeof(u32) * num_read_bo_handles);
if (IS_ERR(bo_handles_read))
return PTR_ERR(bo_handles_read);
num_write_bo_handles = wait_info->num_bo_write_handles;
bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
sizeof(u32) * num_write_bo_handles);
if (IS_ERR(bo_handles_write)) {
r = PTR_ERR(bo_handles_write);
goto free_bo_handles_read;
}
num_syncobj = wait_info->num_syncobj_handles;
syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
sizeof(u32) * num_syncobj);
if (IS_ERR(syncobj_handles)) {
r = PTR_ERR(syncobj_handles);
goto free_bo_handles_write;
}
num_points = wait_info->num_syncobj_timeline_handles;
timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
sizeof(u32) * num_points);
if (IS_ERR(timeline_handles)) {
r = PTR_ERR(timeline_handles);
goto free_syncobj_handles;
}
timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
sizeof(u32) * num_points);
if (IS_ERR(timeline_points)) {
r = PTR_ERR(timeline_points);
goto free_timeline_handles;
}
gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
if (!gobj_read) {
r = -ENOMEM;
goto free_timeline_points;
}
for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
if (!gobj_read[rentry]) {
r = -ENOENT;
goto put_gobj_read;
}
}
gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
if (!gobj_write) {
r = -ENOMEM;
goto put_gobj_read;
}
for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
if (!gobj_write[wentry]) {
r = -ENOENT;
goto put_gobj_write;
}
}
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
(num_read_bo_handles + num_write_bo_handles));
/* Lock all BOs with retry handling */
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
drm_exec_retry_on_contention(&exec);
if (r) {
drm_exec_fini(&exec);
goto put_gobj_write;
}
r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
drm_exec_retry_on_contention(&exec);
if (r) {
drm_exec_fini(&exec);
goto put_gobj_write;
}
}
if (!wait_info->num_fences) {
if (num_points) {
struct dma_fence_unwrap iter;
struct dma_fence *fence;
struct dma_fence *f;
for (i = 0; i < num_points; i++) {
r = drm_syncobj_find_fence(filp, timeline_handles[i],
timeline_points[i],
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
&fence);
if (r)
goto exec_fini;
dma_fence_unwrap_for_each(f, &iter, fence)
num_fences++;
dma_fence_put(fence);
}
}
/* Count syncobj's fence */
for (i = 0; i < num_syncobj; i++) {
struct dma_fence *fence;
r = drm_syncobj_find_fence(filp, syncobj_handles[i],
0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
&fence);
if (r)
goto exec_fini;
num_fences++;
dma_fence_put(fence);
}
/* Count GEM objects fence */
for (i = 0; i < num_read_bo_handles; i++) {
struct dma_resv_iter resv_cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
DMA_RESV_USAGE_READ, fence)
num_fences++;
}
for (i = 0; i < num_write_bo_handles; i++) {
struct dma_resv_iter resv_cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
DMA_RESV_USAGE_WRITE, fence)
num_fences++;
}
/*
* Passing num_fences = 0 means that userspace doesn't want to
* retrieve userq_fence_info. If num_fences = 0 we skip filling
* userq_fence_info and return the actual number of fences on
* args->num_fences.
*/
wait_info->num_fences = num_fences;
} else {
/* Array of fence info */
fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
if (!fence_info) {
r = -ENOMEM;
goto exec_fini;
}
/* Array of fences */
fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences) {
r = -ENOMEM;
goto free_fence_info;
}
/* Retrieve GEM read objects fence */
for (i = 0; i < num_read_bo_handles; i++) {
struct dma_resv_iter resv_cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
DMA_RESV_USAGE_READ, fence) {
if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
r = -EINVAL;
goto free_fences;
}
fences[num_fences++] = fence;
dma_fence_get(fence);
}
}
/* Retrieve GEM write objects fence */
for (i = 0; i < num_write_bo_handles; i++) {
struct dma_resv_iter resv_cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
DMA_RESV_USAGE_WRITE, fence) {
if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
r = -EINVAL;
goto free_fences;
}
fences[num_fences++] = fence;
dma_fence_get(fence);
}
}
if (num_points) {
struct dma_fence_unwrap iter;
struct dma_fence *fence;
struct dma_fence *f;
for (i = 0; i < num_points; i++) {
r = drm_syncobj_find_fence(filp, timeline_handles[i],
timeline_points[i],
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
&fence);
if (r)
goto free_fences;
dma_fence_unwrap_for_each(f, &iter, fence) {
if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
r = -EINVAL;
goto free_fences;
}
dma_fence_get(f);
fences[num_fences++] = f;
}
dma_fence_put(fence);
}
}
/* Retrieve syncobj's fence */
for (i = 0; i < num_syncobj; i++) {
struct dma_fence *fence;
r = drm_syncobj_find_fence(filp, syncobj_handles[i],
0,
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
&fence);
if (r)
goto free_fences;
if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
r = -EINVAL;
goto free_fences;
}
fences[num_fences++] = fence;
}
for (i = 0, cnt = 0; i < num_fences; i++) {
struct amdgpu_userq_fence_driver *fence_drv;
struct amdgpu_userq_fence *userq_fence;
u32 index;
userq_fence = to_amdgpu_userq_fence(fences[i]);
if (!userq_fence) {
/*
* Just waiting on other driver fences should
* be good for now
*/
r = dma_fence_wait(fences[i], true);
if (r) {
dma_fence_put(fences[i]);
goto free_fences;
}
dma_fence_put(fences[i]);
continue;
}
fence_drv = userq_fence->fence_drv;
/*
* We need to make sure the user queue release their reference
* to the fence drivers at some point before queue destruction.
* Otherwise, we would gather those references until we don't
* have any more space left and crash.
*/
if (fence_drv->fence_drv_xa_ptr) {
r = xa_alloc(fence_drv->fence_drv_xa_ptr, &index, fence_drv,
xa_limit_32b, GFP_KERNEL);
if (r)
goto free_fences;
amdgpu_userq_fence_driver_get(fence_drv);
}
/* Store drm syncobj's gpu va address and value */
fence_info[cnt].va = fence_drv->va;
fence_info[cnt].value = fences[i]->seqno;
dma_fence_put(fences[i]);
/* Increment the actual userq fence count */
cnt++;
}
wait_info->num_fences = cnt;
/* Copy userq fence info to user space */
if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
fence_info, wait_info->num_fences * sizeof(*fence_info))) {
r = -EFAULT;
goto free_fences;
}
kfree(fences);
kfree(fence_info);
}
drm_exec_fini(&exec);
for (i = 0; i < num_read_bo_handles; i++)
drm_gem_object_put(gobj_read[i]);
kfree(gobj_read);
for (i = 0; i < num_write_bo_handles; i++)
drm_gem_object_put(gobj_write[i]);
kfree(gobj_write);
kfree(timeline_points);
kfree(timeline_handles);
kfree(syncobj_handles);
kfree(bo_handles_write);
kfree(bo_handles_read);
return 0;
free_fences:
while (num_fences-- > 0)
dma_fence_put(fences[num_fences]);
kfree(fences);
free_fence_info:
kfree(fence_info);
exec_fini:
drm_exec_fini(&exec);
put_gobj_write:
while (wentry-- > 0)
drm_gem_object_put(gobj_write[wentry]);
kfree(gobj_write);
put_gobj_read:
while (rentry-- > 0)
drm_gem_object_put(gobj_read[rentry]);
kfree(gobj_read);
free_timeline_points:
kfree(timeline_points);
free_timeline_handles:
kfree(timeline_handles);
free_syncobj_handles:
kfree(syncobj_handles);
free_bo_handles_write:
kfree(bo_handles_write);
free_bo_handles_read:
kfree(bo_handles_read);
return r;
}
#else
int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
return -ENOTSUPP;
}
#endif