mirror of
https://github.com/raspberrypi/linux.git
synced 2026-01-03 00:03:44 +00:00
drm/msm: Split out map/unmap ops
With async VM_BIND, the actual pgtable updates are deferred. Synchronously, a list of map/unmap ops will be generated, but the actual pgtable changes are deferred. To support that, split out op handlers and change the existing non-VM_BIND paths to use them. Note in particular, the vma itself may already be destroyed/freed by the time an UNMAP op runs (or even a MAP op if there is a later queued UNMAP). For this reason, the op handlers cannot reference the vma pointer. Signed-off-by: Rob Clark <robdclark@chromium.org> Signed-off-by: Rob Clark <robin.clark@oss.qualcomm.com> Tested-by: Antonino Maniscalco <antomani103@gmail.com> Reviewed-by: Antonino Maniscalco <antomani103@gmail.com> Patchwork: https://patchwork.freedesktop.org/patch/661516/
This commit is contained in:
@@ -8,6 +8,34 @@
|
||||
#include "msm_gem.h"
|
||||
#include "msm_mmu.h"
|
||||
|
||||
#define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* struct msm_vm_map_op - create new pgtable mapping
|
||||
*/
|
||||
struct msm_vm_map_op {
|
||||
/** @iova: start address for mapping */
|
||||
uint64_t iova;
|
||||
/** @range: size of the region to map */
|
||||
uint64_t range;
|
||||
/** @offset: offset into @sgt to map */
|
||||
uint64_t offset;
|
||||
/** @sgt: pages to map, or NULL for a PRR mapping */
|
||||
struct sg_table *sgt;
|
||||
/** @prot: the mapping protection flags */
|
||||
int prot;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct msm_vm_unmap_op - unmap a range of pages from pgtable
|
||||
*/
|
||||
struct msm_vm_unmap_op {
|
||||
/** @iova: start address for unmap */
|
||||
uint64_t iova;
|
||||
/** @range: size of region to unmap */
|
||||
uint64_t range;
|
||||
};
|
||||
|
||||
static void
|
||||
msm_gem_vm_free(struct drm_gpuvm *gpuvm)
|
||||
{
|
||||
@@ -21,18 +49,36 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm)
|
||||
kfree(vm);
|
||||
}
|
||||
|
||||
static void
|
||||
vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
|
||||
{
|
||||
vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
|
||||
|
||||
vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
|
||||
}
|
||||
|
||||
static int
|
||||
vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
|
||||
{
|
||||
vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range);
|
||||
|
||||
return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
|
||||
op->range, op->prot);
|
||||
}
|
||||
|
||||
/* Actually unmap memory for the vma */
|
||||
void msm_gem_vma_unmap(struct drm_gpuva *vma)
|
||||
{
|
||||
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
|
||||
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
|
||||
unsigned size = vma->va.range;
|
||||
|
||||
/* Don't do anything if the memory isn't mapped */
|
||||
if (!msm_vma->mapped)
|
||||
return;
|
||||
|
||||
vm->mmu->funcs->unmap(vm->mmu, vma->va.addr, size);
|
||||
vm_unmap_op(to_msm_vm(vma->vm), &(struct msm_vm_unmap_op){
|
||||
.iova = vma->va.addr,
|
||||
.range = vma->va.range,
|
||||
});
|
||||
|
||||
msm_vma->mapped = false;
|
||||
}
|
||||
@@ -42,7 +88,6 @@ int
|
||||
msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
|
||||
{
|
||||
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
|
||||
struct msm_gem_vm *vm = to_msm_vm(vma->vm);
|
||||
int ret;
|
||||
|
||||
if (GEM_WARN_ON(!vma->va.addr))
|
||||
@@ -62,9 +107,13 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
|
||||
* Revisit this if we can come up with a scheme to pre-alloc pages
|
||||
* for the pgtable in map/unmap ops.
|
||||
*/
|
||||
ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt,
|
||||
vma->gem.offset, vma->va.range,
|
||||
prot);
|
||||
ret = vm_map_op(to_msm_vm(vma->vm), &(struct msm_vm_map_op){
|
||||
.iova = vma->va.addr,
|
||||
.range = vma->va.range,
|
||||
.offset = vma->gem.offset,
|
||||
.sgt = sgt,
|
||||
.prot = prot,
|
||||
});
|
||||
if (ret) {
|
||||
msm_vma->mapped = false;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user