summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c158
1 files changed, 79 insertions, 79 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 00cc7d1abaa3..db961eade225 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -58,7 +58,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nvkm_fb_tile *tile = &fb->tile.region[i];
nouveau_fence_unref(&reg->fence);
@@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 zeta)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
+ struct nvkm_fb *fb = nvxx_fb(drm);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
- if (!nouveau_cli_uvmm(cli) || internal) {
- /* for BO noVM allocs, don't assign kinds */
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
}
- nvbo->mode = tile_mode;
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+
+ if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->page = vmm->page[pi].shift;
} else {
- /* reject other tile flags when in VM mode. */
- if (tile_mode)
- return ERR_PTR(-EINVAL);
- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
- return ERR_PTR(-EINVAL);
-
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -405,27 +399,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
-{
- *n = 0;
-
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
- pl[*n].mem_type = TTM_PL_VRAM;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_GART) {
- pl[*n].mem_type = TTM_PL_TT;
- pl[*n].flags = 0;
- (*n)++;
- }
- if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
- pl[*n].mem_type = TTM_PL_SYSTEM;
- pl[(*n)++].flags = 0;
- }
-}
-
-static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
@@ -452,10 +425,6 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
nvbo->placements[i].fpfn = fpfn;
nvbo->placements[i].lpfn = lpfn;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = fpfn;
- nvbo->busy_placements[i].lpfn = lpfn;
- }
}
}
@@ -463,29 +432,43 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
- struct ttm_placement *pl = &nvbo->placement;
+ unsigned int *n = &nvbo->placement.num_placement;
+ struct ttm_place *pl = nvbo->placements;
- pl->placement = nvbo->placements;
- set_placement_list(nvbo->placements, &pl->num_placement, domain);
+ domain |= busy;
- pl->busy_placement = nvbo->busy_placements;
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- domain | busy);
+ *n = 0;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
+ pl[*n].mem_type = TTM_PL_VRAM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
+ pl[*n].mem_type = TTM_PL_TT;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
+ pl[*n].mem_type = TTM_PL_SYSTEM;
+ pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ?
+ TTM_PL_FLAG_FALLBACK : 0;
+ (*n)++;
+ }
+ nvbo->placement.placement = nvbo->placements;
set_placement_range(nvbo, domain);
}
-int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
+int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
bool force = false, evict = false;
- int ret;
+ int ret = 0;
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
@@ -548,20 +531,15 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
out:
if (force && ret)
nvbo->contig = false;
- ttm_bo_unreserve(bo);
return ret;
}
-int
-nouveau_bo_unpin(struct nouveau_bo *nvbo)
+void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
- ret = ttm_bo_reserve(bo, false, false, NULL);
- if (ret)
- return ret;
+ dma_resv_assert_held(bo->base.resv);
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
@@ -576,8 +554,33 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
break;
}
}
+}
+int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
+{
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret)
+ return ret;
+ ret = nouveau_bo_pin_locked(nvbo, domain, contig);
+ ttm_bo_unreserve(bo);
+
+ return ret;
+}
+
+int nouveau_bo_unpin(struct nouveau_bo *nvbo)
+{
+ struct ttm_buffer_object *bo = &nvbo->bo;
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret)
+ return ret;
+ nouveau_bo_unpin_locked(nvbo);
ttm_bo_unreserve(bo);
+
return 0;
}
@@ -856,7 +859,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
- struct nouveau_cli *cli = (void *)chan->user.client;
+ struct nouveau_cli *cli = chan->cli;
struct nouveau_fence *fence;
int ret;
@@ -1168,7 +1171,7 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
struct nouveau_mem *mem = nouveau_mem(reg);
struct nvif_mmu *mmu = &drm->client.mmu;
int ret;
@@ -1264,6 +1267,8 @@ out:
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+ nvbo->bo.resource->bus.offset = 0;
+ nvbo->bo.resource->bus.addr = NULL;
goto retry;
}
@@ -1286,7 +1291,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nvkm_device *device = nvxx_device(&drm->client.device);
+ struct nvkm_device *device = nvxx_device(drm);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
@@ -1314,11 +1319,6 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->placements[i].lpfn = mappable;
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = 0;
- nvbo->busy_placements[i].lpfn = mappable;
- }
-
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}