Files
linux/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
Thomas Hellström fc5d96670e drm/ttm: Move swapped objects off the manager's LRU list
Resources of swapped objects remains on the TTM_PL_SYSTEM manager's
LRU list, which is bad for the LRU walk efficiency.

Rename the device-wide "pinned" list to "unevictable" and move
also resources of swapped-out objects to that list.

An alternative would be to create an "UNEVICTABLE" priority to
be able to keep the pinned- and swapped objects on their
respective manager's LRU without affecting the LRU walk efficiency.

v2:
- Remove a bogus WARN_ON (Christian König)
- Update ttm_resource_[add|del] bulk move (Christian König)
- Fix TTM KUNIT tests (Intel CI)
v3:
- Check for non-NULL bo->resource in ttm_bo_populate().
v4:
- Don't move to LRU tail during swapout until the resource
  is properly swapped or there was a swapout failure.
  (Intel Ci)
- Add a newline after checkpatch check.
v5:
- Introduce ttm_resource_is_swapped() to avoid a corner-case where
  a newly created resource was considered swapped. (Intel CI)
v6:
- Move an assert.

Cc: Christian König <christian.koenig@amd.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240911121859.85387-2-thomas.hellstrom@linux.intel.com
2024-10-09 13:41:30 +02:00

233 lines
6.1 KiB
C

// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include "i915_drv.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
#include "gem/i915_gem_ttm_move.h"
#include "gem/i915_gem_ttm_pm.h"
/**
* i915_ttm_backup_free - Free any backup attached to this object
* @obj: The object whose backup is to be freed.
*/
void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
{
if (obj->ttm.backup) {
i915_gem_object_put(obj->ttm.backup);
obj->ttm.backup = NULL;
}
}
/**
* struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
* @base: The i915_gem_apply_to_region we derive from.
* @allow_gpu: Whether using the gpu blitter is allowed.
* @backup_pinned: On backup, backup also pinned objects.
*/
struct i915_gem_ttm_pm_apply {
struct i915_gem_apply_to_region base;
bool allow_gpu : 1;
bool backup_pinned : 1;
};
static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
struct drm_i915_gem_object *obj)
{
struct i915_gem_ttm_pm_apply *pm_apply =
container_of(apply, typeof(*pm_apply), base);
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct ttm_buffer_object *backup_bo;
struct drm_i915_private *i915 =
container_of(bo->bdev, typeof(*i915), bdev);
struct drm_i915_gem_object *backup;
struct ttm_operation_ctx ctx = {};
unsigned int flags;
int err = 0;
if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
if (!pm_apply->backup_pinned ||
(pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
return 0;
if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
return 0;
/*
* It seems that we might have some framebuffers still pinned at this
* stage, but for such objects we might also need to deal with the CCS
* aux state. Make sure we force the save/restore of the CCS state,
* otherwise we might observe display corruption, when returning from
* suspend.
*/
flags = 0;
if (i915_gem_object_needs_ccs_pages(obj)) {
WARN_ON_ONCE(!i915_gem_object_is_framebuffer(obj));
WARN_ON_ONCE(!pm_apply->allow_gpu);
flags = I915_BO_ALLOC_CCS_AUX;
}
backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
obj->base.size, 0, flags);
if (IS_ERR(backup))
return PTR_ERR(backup);
err = i915_gem_object_lock(backup, apply->ww);
if (err)
goto out_no_lock;
backup_bo = i915_gem_to_ttm(backup);
err = ttm_bo_populate(backup_bo, &ctx);
if (err)
goto out_no_populate;
err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
if (err) {
drm_err(&i915->drm,
"Unable to copy from device to system memory, err:%pe\n",
ERR_PTR(err));
goto out_no_populate;
}
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = backup;
return 0;
out_no_populate:
i915_gem_ww_unlock_single(backup);
out_no_lock:
i915_gem_object_put(backup);
return err;
}
static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
struct drm_i915_gem_object *obj)
{
i915_ttm_backup_free(obj);
return 0;
}
/**
* i915_ttm_recover_region - Free the backup of all objects of a region
* @mr: The memory region
*
* Checks all objects of a region if there is backup attached and if so
* frees that backup. Typically this is called to recover after a partially
* performed backup.
*/
void i915_ttm_recover_region(struct intel_memory_region *mr)
{
static const struct i915_gem_apply_to_region_ops recover_ops = {
.process_obj = i915_ttm_recover,
};
struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
int ret;
ret = i915_gem_process_region(mr, &apply);
GEM_WARN_ON(ret);
}
/**
* i915_ttm_backup_region - Back up all objects of a region to smem.
* @mr: The memory region
* @flags: TTM backup flags
*
* Loops over all objects of a region and either evicts them if they are
* evictable or backs them up using a backup object if they are pinned.
*
* Return: Zero on success. Negative error code on error.
*/
int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
{
static const struct i915_gem_apply_to_region_ops backup_ops = {
.process_obj = i915_ttm_backup,
};
struct i915_gem_ttm_pm_apply pm_apply = {
.base = {.ops = &backup_ops},
.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
.backup_pinned = flags & I915_TTM_BACKUP_PINNED,
};
return i915_gem_process_region(mr, &pm_apply.base);
}
static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
struct drm_i915_gem_object *obj)
{
struct i915_gem_ttm_pm_apply *pm_apply =
container_of(apply, typeof(*pm_apply), base);
struct drm_i915_gem_object *backup = obj->ttm.backup;
struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
struct ttm_operation_ctx ctx = {};
int err;
if (!backup)
return 0;
if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
return 0;
err = i915_gem_object_lock(backup, apply->ww);
if (err)
return err;
/* Content may have been swapped. */
if (!backup_bo->resource)
err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
if (!err)
err = ttm_bo_populate(backup_bo, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
GEM_WARN_ON(err);
ttm_bo_wait_ctx(backup_bo, &ctx);
obj->ttm.backup = NULL;
err = 0;
}
i915_gem_ww_unlock_single(backup);
if (!err)
i915_gem_object_put(backup);
return err;
}
/**
* i915_ttm_restore_region - Restore backed-up objects of a region from smem.
* @mr: The memory region
* @flags: TTM backup flags
*
* Loops over all objects of a region and if they are backed-up, restores
* them from smem.
*
* Return: Zero on success. Negative error code on error.
*/
int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
{
static const struct i915_gem_apply_to_region_ops restore_ops = {
.process_obj = i915_ttm_restore,
};
struct i915_gem_ttm_pm_apply pm_apply = {
.base = {.ops = &restore_ops},
.allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
};
return i915_gem_process_region(mr, &pm_apply.base);
}