Files
linux/drivers/gpu/drm/xe/xe_svm.h
Shuicheng Lin 9d80698bcd drm/xe: Add config control for svm flush work
Without CONFIG_DRM_XE_GPUSVM set, GPU SVM is not initialized thus below
warning pops. Refine the flush work code to be controlled by the config
to avoid below warning:
"
[  453.132028] ------------[ cut here ]------------
[  453.132527] WARNING: CPU: 9 PID: 4491 at kernel/workqueue.c:4205 __flush_work+0x379/0x3a0
[  453.133355] Modules linked in: xe drm_ttm_helper ttm gpu_sched drm_buddy drm_suballoc_helper drm_gpuvm drm_exec
[  453.134352] CPU: 9 UID: 0 PID: 4491 Comm: xe_exec_mix_mod Tainted: G     U  W           6.15.0-rc3+ #7 PREEMPT(full)
[  453.135405] Tainted: [U]=USER, [W]=WARN
...
[  453.136921] RIP: 0010:__flush_work+0x379/0x3a0
[  453.137417] Code: 8b 45 00 48 8b 55 08 89 c7 48 c1 e8 04 83 e7 08 83 e0 0f 83 cf 02 89 c6 48 0f ba 6d 00 03 e9 d5 fe ff ff 0f 0b e9 db fd ff ff <0f> 0b 45 31 e4 e9 d1 fd ff ff 0f 0b e9 03 ff ff ff 0f 0b e9 d6 fe
[  453.139250] RSP: 0018:ffffc90000c67b18 EFLAGS: 00010246
[  453.139782] RAX: 0000000000000000 RBX: ffff888108a24000 RCX: 0000000000002000
[  453.140521] RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff8881016d61c8
[  453.141253] RBP: ffff8881016d61c8 R08: 0000000000000000 R09: 0000000000000000
[  453.141985] R10: 0000000000000000 R11: 0000000008a24000 R12: 0000000000000001
[  453.142709] R13: 0000000000000002 R14: 0000000000000000 R15: ffff888107db8c00
[  453.143450] FS:  00007f44853d4c80(0000) GS:ffff8882f469b000(0000) knlGS:0000000000000000
[  453.144276] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[  453.144853] CR2: 00007f4487629228 CR3: 00000001016aa000 CR4: 00000000000406f0
[  453.145594] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[  453.146320] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[  453.147061] Call Trace:
[  453.147336]  <TASK>
[  453.147579]  ? tick_nohz_tick_stopped+0xd/0x30
[  453.148067]  ? xas_load+0x9/0xb0
[  453.148435]  ? xa_load+0x6f/0xb0
[  453.148781]  __xe_vm_bind_ioctl+0xbd5/0x1500 [xe]
[  453.149338]  ? dev_printk_emit+0x48/0x70
[  453.149762]  ? _dev_printk+0x57/0x80
[  453.150148]  ? drm_ioctl+0x17c/0x440
[  453.150544]  ? __drm_dev_vprintk+0x36/0x90
[  453.150983]  ? __pfx_xe_vm_bind_ioctl+0x10/0x10 [xe]
[  453.151575]  ? drm_ioctl_kernel+0x9f/0xf0
[  453.151998]  ? __pfx_xe_vm_bind_ioctl+0x10/0x10 [xe]
[  453.152560]  drm_ioctl_kernel+0x9f/0xf0
[  453.152968]  drm_ioctl+0x20f/0x440
[  453.153332]  ? __pfx_xe_vm_bind_ioctl+0x10/0x10 [xe]
[  453.153893]  ? ioctl_has_perm.constprop.0.isra.0+0xae/0x100
[  453.154489]  ? memory_bm_test_bit+0x5/0x60
[  453.154935]  xe_drm_ioctl+0x47/0x70 [xe]
[  453.155419]  __x64_sys_ioctl+0x8d/0xc0
[  453.155824]  do_syscall_64+0x47/0x110
[  453.156228]  entry_SYSCALL_64_after_hwframe+0x76/0x7e
"

v2 (Matt):
    refine commit message to have more details
    add Fixes tag
    move the code to xe_svm.h which already have the config
    remove a blank line per codestyle suggestion

Fixes: 63f6e480d1 ("drm/xe: Add SVM garbage collector")
Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20250502170052.1787973-1-shuicheng.lin@intel.com
2025-05-07 11:11:15 -07:00

191 lines
3.9 KiB
C

/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2024 Intel Corporation
*/
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
#include <drm/drm_gpusvm.h>
#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
struct xe_bo;
struct xe_gt;
struct xe_tile;
struct xe_vm;
struct xe_vma;
struct xe_vram_region;
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
/** @base: base drm_gpusvm_range */
struct drm_gpusvm_range base;
/**
* @garbage_collector_link: Link into VM's garbage collect SVM range
* list. Protected by VM's garbage collect lock.
*/
struct list_head garbage_collector_link;
/**
* @tile_present: Tile mask of binding is present for this range.
* Protected by GPU SVM notifier lock.
*/
u8 tile_present;
/**
* @tile_invalidated: Tile mask of binding is invalidated for this
* range. Protected by GPU SVM notifier lock.
*/
u8 tile_invalidated;
/**
* @skip_migrate: Skip migration to VRAM, protected by GPU fault handler
* locking.
*/
u8 skip_migrate :1;
};
/**
* xe_svm_range_pages_valid() - SVM range pages valid
* @range: SVM range
*
* Return: True if SVM range pages are valid, False otherwise
*/
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
}
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
int xe_svm_init(struct xe_vm *vm);
void xe_svm_fini(struct xe_vm *vm);
void xe_svm_close(struct xe_vm *vm);
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool atomic);
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
*
* Return: True if SVM range has a DMA mapping, False otherwise
*/
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
return range->base.flags.has_dma_mapping;
}
#define xe_svm_assert_in_notifier(vm__) \
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
#define xe_svm_notifier_lock(vm__) \
drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
#define xe_svm_notifier_unlock(vm__) \
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
void xe_svm_flush(struct xe_vm *vm);
#else
#include <linux/interval_tree.h>
struct drm_pagemap_device_addr;
struct xe_bo;
struct xe_gt;
struct xe_vm;
struct xe_vma;
struct xe_tile;
struct xe_vram_region;
#define XE_INTERCONNECT_VRAM 1
struct xe_svm_range {
struct {
struct interval_tree_node itree;
const struct drm_pagemap_device_addr *dma_addr;
} base;
u32 tile_present;
u32 tile_invalidated;
};
static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
{
return false;
}
static inline
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
static inline
int xe_svm_init(struct xe_vm *vm)
{
return 0;
}
static inline
void xe_svm_fini(struct xe_vm *vm)
{
}
static inline
void xe_svm_close(struct xe_vm *vm)
{
}
static inline
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
struct xe_gt *gt, u64 fault_addr,
bool atomic)
{
return 0;
}
static inline
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
{
return false;
}
static inline
int xe_svm_bo_evict(struct xe_bo *bo)
{
return 0;
}
static inline
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
static inline void xe_svm_flush(struct xe_vm *vm)
{
}
#endif
#endif