mirror of
https://github.com/raspberrypi/linux.git
synced 2025-12-06 01:49:46 +00:00
drm/nouveau: separate buffer object backing memory from nvkm structures
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
@@ -30,6 +30,7 @@ nouveau-y += nouveau_vga.o
|
||||
# DRM - memory management
|
||||
nouveau-y += nouveau_bo.o
|
||||
nouveau-y += nouveau_gem.o
|
||||
nouveau-y += nouveau_mem.o
|
||||
nouveau-y += nouveau_prime.o
|
||||
nouveau-y += nouveau_sgdma.o
|
||||
nouveau-y += nouveau_ttm.o
|
||||
|
||||
@@ -22,12 +22,6 @@
|
||||
#define NV_MEM_COMP_VM 0x03
|
||||
|
||||
struct nvkm_mem {
|
||||
struct drm_device *dev;
|
||||
|
||||
struct nvkm_vma bar_vma;
|
||||
struct nvkm_vma vma[2];
|
||||
u8 page_shift;
|
||||
|
||||
struct nvkm_mm_node *tag;
|
||||
struct nvkm_mm_node *mem;
|
||||
dma_addr_t *pages;
|
||||
|
||||
@@ -20,7 +20,10 @@ struct nvkm_vma {
|
||||
int refcount;
|
||||
struct nvkm_vm *vm;
|
||||
struct nvkm_mm_node *node;
|
||||
u64 offset;
|
||||
union {
|
||||
u64 offset;
|
||||
u64 addr;
|
||||
};
|
||||
u32 access;
|
||||
};
|
||||
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_ttm.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_mem.h"
|
||||
|
||||
/*
|
||||
* NV10-NV40 tiling helpers
|
||||
@@ -670,14 +671,14 @@ static int
|
||||
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
int ret = RING_SPACE(chan, 10);
|
||||
if (ret == 0) {
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
|
||||
OUT_RING (chan, PAGE_SIZE);
|
||||
OUT_RING (chan, PAGE_SIZE);
|
||||
OUT_RING (chan, PAGE_SIZE);
|
||||
@@ -702,9 +703,9 @@ static int
|
||||
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
@@ -740,9 +741,9 @@ static int
|
||||
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
@@ -779,9 +780,9 @@ static int
|
||||
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
@@ -817,14 +818,14 @@ static int
|
||||
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
int ret = RING_SPACE(chan, 7);
|
||||
if (ret == 0) {
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
|
||||
OUT_RING (chan, 0x00000000 /* COPY */);
|
||||
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
|
||||
}
|
||||
@@ -835,15 +836,15 @@ static int
|
||||
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
int ret = RING_SPACE(chan, 7);
|
||||
if (ret == 0) {
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
|
||||
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
|
||||
OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
|
||||
}
|
||||
return ret;
|
||||
@@ -869,12 +870,12 @@ static int
|
||||
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(old_reg);
|
||||
u64 length = (new_reg->num_pages << PAGE_SHIFT);
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
int src_tiled = !!mem->memtype;
|
||||
int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
|
||||
u64 src_offset = mem->vma[0].addr;
|
||||
u64 dst_offset = mem->vma[1].addr;
|
||||
int src_tiled = !!mem->kind;
|
||||
int dst_tiled = !!nouveau_mem(new_reg)->kind;
|
||||
int ret;
|
||||
|
||||
while (length) {
|
||||
@@ -1011,25 +1012,34 @@ static int
|
||||
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *old_mem = bo->mem.mm_node;
|
||||
struct nvkm_mem *new_mem = reg->mm_node;
|
||||
struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
|
||||
struct nouveau_mem *new_mem = nouveau_mem(reg);
|
||||
struct nvkm_vm *vmm = drm->client.vm;
|
||||
u64 size = (u64)reg->num_pages << PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
|
||||
NV_MEM_ACCESS_RW, &old_mem->vma[0]);
|
||||
ret = nvkm_vm_get(vmm, size, old_mem->mem.page, NV_MEM_ACCESS_RW,
|
||||
&old_mem->vma[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
|
||||
NV_MEM_ACCESS_RW, &old_mem->vma[1]);
|
||||
ret = nvkm_vm_get(vmm, size, new_mem->mem.page, NV_MEM_ACCESS_RW,
|
||||
&old_mem->vma[1]);
|
||||
if (ret) {
|
||||
nvkm_vm_put(&old_mem->vma[0]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvkm_vm_map(&old_mem->vma[0], old_mem);
|
||||
nvkm_vm_map(&old_mem->vma[1], new_mem);
|
||||
ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
|
||||
done:
|
||||
if (ret) {
|
||||
nvkm_vm_put(&old_mem->vma[1]);
|
||||
nvkm_vm_put(&old_mem->vma[0]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1211,8 +1221,8 @@ static void
|
||||
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_mem *mem = new_reg ? new_reg->mm_node : NULL;
|
||||
struct nvkm_vma *vma;
|
||||
|
||||
/* ttm can now (stupidly) pass the driver bos it didn't create... */
|
||||
@@ -1220,9 +1230,9 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
return;
|
||||
|
||||
if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
|
||||
mem->page_shift == nvbo->page) {
|
||||
mem->mem.page == nvbo->page) {
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
nvkm_vm_map(vma, mem);
|
||||
nvkm_vm_map(vma, mem->_mem);
|
||||
}
|
||||
} else {
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
@@ -1343,7 +1353,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
|
||||
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct nvkm_mem *mem = reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
int ret;
|
||||
|
||||
reg->bus.addr = NULL;
|
||||
@@ -1365,7 +1375,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
reg->bus.is_iomem = !drm->agp.cma;
|
||||
}
|
||||
#endif
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->kind)
|
||||
/* untiled */
|
||||
break;
|
||||
/* fallthrough, tiled memory */
|
||||
@@ -1377,14 +1387,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
|
||||
int page_shift = 12;
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
page_shift = mem->page_shift;
|
||||
page_shift = mem->mem.page;
|
||||
|
||||
ret = nvkm_vm_get(bar, mem->size << 12, page_shift,
|
||||
NV_MEM_ACCESS_RW, &mem->bar_vma);
|
||||
ret = nvkm_vm_get(bar, mem->_mem->size << 12,
|
||||
page_shift, NV_MEM_ACCESS_RW,
|
||||
&mem->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_vm_map(&mem->bar_vma, mem);
|
||||
nvkm_vm_map(&mem->bar_vma, mem->_mem);
|
||||
reg->bus.offset = mem->bar_vma.offset;
|
||||
}
|
||||
break;
|
||||
@@ -1397,7 +1408,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
static void
|
||||
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *mem = reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
|
||||
if (!mem->bar_vma.node)
|
||||
return;
|
||||
@@ -1606,7 +1617,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
|
||||
struct nvkm_vma *vma)
|
||||
{
|
||||
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
|
||||
struct nvkm_mem *mem = nvbo->bo.mem.mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_vm_get(vm, size, nvbo->page, NV_MEM_ACCESS_RW, vma);
|
||||
@@ -1614,8 +1625,8 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
|
||||
return ret;
|
||||
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
|
||||
mem->page_shift == nvbo->page)
|
||||
nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
|
||||
mem->mem.page == nvbo->page)
|
||||
nvkm_vm_map(vma, mem->_mem);
|
||||
|
||||
list_add_tail(&vma->head, &nvbo->vma_list);
|
||||
vma->refcount = 1;
|
||||
|
||||
114
drivers/gpu/drm/nouveau/nouveau_mem.c
Normal file
114
drivers/gpu/drm/nouveau/nouveau_mem.c
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Copyright 2017 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "nouveau_mem.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_bo.h"
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
|
||||
int
|
||||
nouveau_mem_map(struct nouveau_mem *mem,
|
||||
struct nvkm_vmm *vmm, struct nvkm_vma *vma)
|
||||
{
|
||||
nvkm_vm_map(vma, mem->_mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_mem_fini(struct nouveau_mem *mem)
|
||||
{
|
||||
if (mem->vma[1].node) {
|
||||
nvkm_vm_unmap(&mem->vma[1]);
|
||||
nvkm_vm_put(&mem->vma[1]);
|
||||
}
|
||||
if (mem->vma[0].node) {
|
||||
nvkm_vm_unmap(&mem->vma[0]);
|
||||
nvkm_vm_put(&mem->vma[0]);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
struct nouveau_cli *cli = mem->cli;
|
||||
|
||||
if (mem->kind && cli->device.info.chipset == 0x50)
|
||||
mem->comp = mem->kind = 0;
|
||||
if (mem->comp) {
|
||||
if (cli->device.info.chipset >= 0xc0)
|
||||
mem->kind = gf100_pte_storage_type_map[mem->kind];
|
||||
mem->comp = 0;
|
||||
}
|
||||
|
||||
mem->__mem.size = (reg->num_pages << PAGE_SHIFT) >> 12;
|
||||
mem->__mem.memtype = (mem->comp << 7) | mem->kind;
|
||||
if (tt->ttm.sg) mem->__mem.sg = tt->ttm.sg;
|
||||
else mem->__mem.pages = tt->dma_address;
|
||||
mem->_mem = &mem->__mem;
|
||||
mem->mem.page = 12;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mem_vram(struct ttm_mem_reg *reg, bool contig, u8 page)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
struct nvkm_ram *ram = nvxx_fb(&mem->cli->device)->ram;
|
||||
u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page);
|
||||
int ret;
|
||||
|
||||
mem->mem.page = page;
|
||||
|
||||
ret = ram->func->get(ram, size, 1 << page, contig ? 0 : 1 << page,
|
||||
(mem->comp << 8) | mem->kind, &mem->_mem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reg->start = mem->_mem->offset >> PAGE_SHIFT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_mem_del(struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
nouveau_mem_fini(mem);
|
||||
kfree(reg->mm_node);
|
||||
reg->mm_node = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_mem_new(struct nouveau_cli *cli, u8 kind, u8 comp,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_mem *mem;
|
||||
|
||||
if (!(mem = kzalloc(sizeof(*mem), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
mem->cli = cli;
|
||||
mem->kind = kind;
|
||||
mem->comp = comp;
|
||||
|
||||
reg->mm_node = mem;
|
||||
return 0;
|
||||
}
|
||||
35
drivers/gpu/drm/nouveau/nouveau_mem.h
Normal file
35
drivers/gpu/drm/nouveau/nouveau_mem.h
Normal file
@@ -0,0 +1,35 @@
|
||||
#ifndef __NOUVEAU_MEM_H__
|
||||
#define __NOUVEAU_MEM_H__
|
||||
#include <subdev/fb.h>
|
||||
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
struct ttm_dma_tt;
|
||||
|
||||
static inline struct nouveau_mem *
|
||||
nouveau_mem(struct ttm_mem_reg *reg)
|
||||
{
|
||||
return reg->mm_node;
|
||||
}
|
||||
|
||||
struct nouveau_mem {
|
||||
struct nouveau_cli *cli;
|
||||
u8 kind;
|
||||
u8 comp;
|
||||
struct {
|
||||
u8 page;
|
||||
} mem;
|
||||
struct nvkm_vma vma[2];
|
||||
|
||||
struct nvkm_mem __mem;
|
||||
struct nvkm_mem *_mem;
|
||||
struct nvkm_vma bar_vma;
|
||||
};
|
||||
|
||||
int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
|
||||
struct ttm_mem_reg *);
|
||||
void nouveau_mem_del(struct ttm_mem_reg *);
|
||||
int nouveau_mem_vram(struct ttm_mem_reg *, bool contig, u8 page);
|
||||
int nouveau_mem_host(struct ttm_mem_reg *, struct ttm_dma_tt *);
|
||||
void nouveau_mem_fini(struct nouveau_mem *);
|
||||
int nouveau_mem_map(struct nouveau_mem *, struct nvkm_vmm *, struct nvkm_vma *);
|
||||
#endif
|
||||
@@ -2,6 +2,7 @@
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_mem.h"
|
||||
#include "nouveau_ttm.h"
|
||||
|
||||
struct nouveau_sgdma_be {
|
||||
@@ -9,7 +10,7 @@ struct nouveau_sgdma_be {
|
||||
* nouve_bo.c works properly, otherwise have to move them here
|
||||
*/
|
||||
struct ttm_dma_tt ttm;
|
||||
struct nvkm_mem *node;
|
||||
struct nouveau_mem *mem;
|
||||
};
|
||||
|
||||
static void
|
||||
@@ -27,19 +28,20 @@ static int
|
||||
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct nvkm_mem *node = reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
int ret;
|
||||
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
node->pages = NULL;
|
||||
} else {
|
||||
node->sg = NULL;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
ret = nouveau_mem_host(reg, &nvbe->ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_mem_map(mem, mem->cli->vm, &mem->vma[0]);
|
||||
if (ret) {
|
||||
nouveau_mem_fini(mem);
|
||||
return ret;
|
||||
}
|
||||
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
|
||||
|
||||
nvkm_vm_map(&node->vma[0], node);
|
||||
nvbe->node = node;
|
||||
nvbe->mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -47,7 +49,7 @@ static int
|
||||
nv04_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
nvkm_vm_unmap(&nvbe->node->vma[0]);
|
||||
nouveau_mem_fini(nvbe->mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -61,30 +63,20 @@ static int
|
||||
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct nvkm_mem *node = reg->mm_node;
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
int ret;
|
||||
|
||||
/* noop: bound in move_notify() */
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
node->pages = NULL;
|
||||
} else {
|
||||
node->sg = NULL;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
}
|
||||
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
|
||||
return 0;
|
||||
}
|
||||
ret = nouveau_mem_host(reg, &nvbe->ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
static int
|
||||
nv50_sgdma_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
/* noop: unbound in move_notify() */
|
||||
nvbe->mem = mem;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ttm_backend_func nv50_sgdma_backend = {
|
||||
.bind = nv50_sgdma_bind,
|
||||
.unbind = nv50_sgdma_unbind,
|
||||
.unbind = nv04_sgdma_unbind,
|
||||
.destroy = nouveau_sgdma_destroy
|
||||
};
|
||||
|
||||
|
||||
@@ -23,10 +23,10 @@
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_ttm.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_mem.h"
|
||||
#include "nouveau_ttm.h"
|
||||
|
||||
#include <drm/drm_legacy.h>
|
||||
|
||||
@@ -44,34 +44,27 @@ nouveau_manager_fini(struct ttm_mem_type_manager *man)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
|
||||
{
|
||||
nouveau_mem_del(reg);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_manager_debug(struct ttm_mem_type_manager *man,
|
||||
struct drm_printer *printer)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
nvkm_mem_node_cleanup(struct nvkm_mem *node)
|
||||
{
|
||||
if (node->vma[0].node) {
|
||||
nvkm_vm_unmap(&node->vma[0]);
|
||||
nvkm_vm_put(&node->vma[0]);
|
||||
}
|
||||
|
||||
if (node->vma[1].node) {
|
||||
nvkm_vm_unmap(&node->vma[1]);
|
||||
nvkm_vm_put(&node->vma[1]);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
|
||||
nvkm_mem_node_cleanup(reg->mm_node);
|
||||
ram->func->put(ram, (struct nvkm_mem **)®->mm_node);
|
||||
struct nvkm_mem *mem = nouveau_mem(reg)->_mem;
|
||||
nouveau_mem_del(reg);
|
||||
ram->func->put(ram, &mem);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -80,31 +73,29 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_mem *node;
|
||||
u32 size_nc = 0;
|
||||
struct nouveau_drm *drm = nvbo->cli->drm;
|
||||
struct nouveau_mem *mem;
|
||||
int ret;
|
||||
|
||||
if (drm->client.device.info.ram_size == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!nvbo->contig)
|
||||
size_nc = 1 << nvbo->page;
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
|
||||
mem = nouveau_mem(reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
|
||||
reg->page_alignment << PAGE_SHIFT, size_nc,
|
||||
nvbo->comp << 8 | nvbo->kind, &node);
|
||||
ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
|
||||
if (ret) {
|
||||
reg->mm_node = NULL;
|
||||
return (ret == -ENOSPC) ? 0 : ret;
|
||||
nouveau_mem_del(reg);
|
||||
if (ret == -ENOSPC) {
|
||||
reg->mm_node = NULL;
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
node->page_shift = nvbo->page;
|
||||
|
||||
reg->mm_node = node;
|
||||
reg->start = node->offset >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -116,54 +107,24 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
|
||||
.debug = nouveau_manager_debug,
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
nvkm_mem_node_cleanup(reg->mm_node);
|
||||
kfree(reg->mm_node);
|
||||
reg->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_mem *node;
|
||||
struct nouveau_drm *drm = nvbo->cli->drm;
|
||||
struct nouveau_mem *mem;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
|
||||
mem = nouveau_mem(reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
node->page_shift = 12;
|
||||
|
||||
switch (drm->client.device.info.family) {
|
||||
case NV_DEVICE_INFO_V0_TNT:
|
||||
case NV_DEVICE_INFO_V0_CELSIUS:
|
||||
case NV_DEVICE_INFO_V0_KELVIN:
|
||||
case NV_DEVICE_INFO_V0_RANKINE:
|
||||
case NV_DEVICE_INFO_V0_CURIE:
|
||||
break;
|
||||
case NV_DEVICE_INFO_V0_TESLA:
|
||||
case NV_DEVICE_INFO_V0_FERMI:
|
||||
case NV_DEVICE_INFO_V0_KEPLER:
|
||||
case NV_DEVICE_INFO_V0_MAXWELL:
|
||||
case NV_DEVICE_INFO_V0_PASCAL:
|
||||
if (drm->client.device.info.chipset != 0x50)
|
||||
node->memtype = nvbo->kind;
|
||||
break;
|
||||
default:
|
||||
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
|
||||
drm->client.device.info.family);
|
||||
break;
|
||||
}
|
||||
|
||||
reg->mm_node = node;
|
||||
reg->start = 0;
|
||||
mem->_mem = &mem->__mem;
|
||||
reg->start = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -171,50 +132,40 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
|
||||
.init = nouveau_manager_init,
|
||||
.takedown = nouveau_manager_fini,
|
||||
.get_node = nouveau_gart_manager_new,
|
||||
.put_node = nouveau_gart_manager_del,
|
||||
.put_node = nouveau_manager_del,
|
||||
.debug = nouveau_manager_debug
|
||||
};
|
||||
|
||||
static void
|
||||
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *node = reg->mm_node;
|
||||
if (node->vma[0].node)
|
||||
nvkm_vm_put(&node->vma[0]);
|
||||
kfree(reg->mm_node);
|
||||
reg->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nouveau_drm *drm = nvbo->cli->drm;
|
||||
struct nouveau_mem *mem;
|
||||
struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
|
||||
struct nvkm_mem *node;
|
||||
int ret;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
|
||||
mem = nouveau_mem(reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
node->page_shift = 12;
|
||||
|
||||
ret = nvkm_vm_get(mmu->vmm, reg->num_pages << 12, node->page_shift,
|
||||
NV_MEM_ACCESS_RW, &node->vma[0]);
|
||||
ret = nvkm_vm_get(mmu->vmm, reg->num_pages << 12, 12,
|
||||
NV_MEM_ACCESS_RW, &mem->vma[0]);
|
||||
if (ret) {
|
||||
nouveau_mem_del(reg);
|
||||
if (ret == -ENOSPC) {
|
||||
reg->mm_node = NULL;
|
||||
ret = 0;
|
||||
return 0;
|
||||
}
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
reg->mm_node = node;
|
||||
reg->start = node->vma[0].offset >> PAGE_SHIFT;
|
||||
mem->_mem = &mem->__mem;
|
||||
reg->start = mem->vma[0].addr >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -222,7 +173,7 @@ const struct ttm_mem_type_manager_func nv04_gart_manager = {
|
||||
.init = nouveau_manager_init,
|
||||
.takedown = nouveau_manager_fini,
|
||||
.get_node = nv04_gart_manager_new,
|
||||
.put_node = nv04_gart_manager_del,
|
||||
.put_node = nouveau_manager_del,
|
||||
.debug = nouveau_manager_debug
|
||||
};
|
||||
|
||||
|
||||
@@ -530,7 +530,6 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
||||
/* present memory for being mapped using small pages */
|
||||
node->mem.size = size >> 12;
|
||||
node->mem.memtype = 0;
|
||||
node->mem.page_shift = 12;
|
||||
|
||||
nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
|
||||
size, align, node->mem.offset);
|
||||
|
||||
@@ -356,7 +356,6 @@ nv50_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iobj->mem->page_shift = 12;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user