summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c99
1 files changed, 91 insertions, 8 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 70fb003a6666..f26562eafffc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -58,7 +58,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -144,6 +144,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
+ if (bo->base.import_attach)
+ drm_prime_gem_destroy(&bo->base, bo->sg);
+
/*
* If nouveau_bo_new() allocated this buffer, the GEM object was never
* initialized, so don't attempt to release it.
@@ -398,6 +401,83 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
return 0;
}
+void
+nouveau_bo_unpin_del(struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo = *pnvbo;
+
+ if (!nvbo)
+ return;
+
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ nouveau_bo_fini(nvbo);
+
+ *pnvbo = NULL;
+}
+
+int
+nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_pin(nvbo, domain, false);
+ if (ret) {
+ nouveau_bo_fini(nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo)
+{
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_pin(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_bo_map(nvbo);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
+int
+nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size,
+ struct nouveau_bo **pnvbo, struct nouveau_vma **pvma)
+{
+ struct nouveau_vmm *vmm = nouveau_cli_vmm(cli);
+ struct nouveau_bo *nvbo;
+ int ret;
+
+ ret = nouveau_bo_new_map(cli, domain, size, &nvbo);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vma_new(nvbo, vmm, pvma);
+ if (ret) {
+ nouveau_bo_unpin_del(&nvbo);
+ return ret;
+ }
+
+ *pnvbo = nvbo;
+ return 0;
+}
+
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -849,7 +929,7 @@ done:
nvif_vmm_put(vmm, &old_mem->vma[1]);
nvif_vmm_put(vmm, &old_mem->vma[0]);
}
- return 0;
+ return ret;
}
static int
@@ -859,7 +939,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence;
int ret;
@@ -920,6 +1000,9 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_resource *, struct ttm_resource *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1171,7 +1254,7 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvif_mmu *mmu = &drm->client.mmu;
int ret;
@@ -1201,7 +1284,7 @@ retry:
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
- device->func->resource_addr(device, 1);
+ device->func->resource_addr(device, NVKM_BAR1_FB);
reg->bus.is_iomem = true;
/* Some BARs do not support being ioremapped WC */
@@ -1291,8 +1374,8 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
- u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
+ struct nvkm_device *device = nvxx_device(drm);
+ u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got