diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 387 |
1 files changed, 266 insertions, 121 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 288eebc70a67..f26562eafffc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -58,7 +58,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nvkm_fb *fb = nvxx_fb(&drm->client.device); + struct nvkm_fb *fb = nvxx_fb(drm); struct nvkm_fb_tile *tile = &fb->tile.region[i]; nouveau_fence_unref(®->fence); @@ -109,7 +109,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, u32 size, u32 pitch, u32 zeta) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_fb *fb = nvxx_fb(&drm->client.device); + struct nvkm_fb *fb = nvxx_fb(drm); struct nouveau_drm_tile *tile, *found = NULL; int i; @@ -144,14 +144,24 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sg); + /* * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. */ - if (bo->base.dev) + if (bo->base.dev) { + /* Gem objects not being shared with other VMs get their + * dma_resv from a root GEM object. + */ + if (nvbo->no_share) + drm_gem_object_put(nvbo->r_obj); + drm_gem_object_release(&bo->base); - else + } else { dma_resv_fini(&bo->base._resv); + } kfree(nvbo); } @@ -199,12 +209,12 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) struct nouveau_bo * nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, - u32 tile_mode, u32 tile_flags) + u32 tile_mode, u32 tile_flags, bool internal) { struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; struct nvif_mmu *mmu = &cli->mmu; - struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; + struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm; int i, pi = -1; if (!*size) { @@ -215,6 +225,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); @@ -232,6 +243,8 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, nvbo->force_coherent = true; } + nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { nvbo->kind = (tile_flags & 0x0000ff00) >> 8; if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { @@ -240,8 +253,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, } nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; - } else - if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { nvbo->kind = (tile_flags & 0x00007f00) >> 8; nvbo->comp = (tile_flags & 0x00030000) >> 16; if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { @@ -252,48 +264,77 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, nvbo->zeta = (tile_flags & 0x00000007); } nvbo->mode = tile_mode; - nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); - /* Determine the desirable target GPU page size for the buffer. */ - for (i = 0; i < vmm->page_nr; i++) { - /* Because we cannot currently allow VMM maps to fail - * during buffer migration, we need to determine page - * size for the buffer up-front, and pre-allocate its - * page tables. - * - * Skip page sizes that can't support needed domains. - */ - if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && - (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) - continue; - if ((domain & NOUVEAU_GEM_DOMAIN_GART) && - (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) - continue; + if (!nouveau_cli_uvmm(cli) || internal) { + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) + continue; + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; - /* Select this page size if it's the first that supports - * the potential memory domains, or when it's compatible - * with the requested compression settings. - */ - if (pi < 0 || !nvbo->comp || vmm->page[i].comp) - pi = i; + /* Select this page size if it's the first that supports + * the potential memory domains, or when it's compatible + * with the requested compression settings. + */ + if (pi < 0 || !nvbo->comp || vmm->page[i].comp) + pi = i; - /* Stop once the buffer is larger than the current page size. */ - if (*size >= 1ULL << vmm->page[i].shift) - break; - } + /* Stop once the buffer is larger than the current page size. */ + if (*size >= 1ULL << vmm->page[i].shift) + break; + } - if (WARN_ON(pi < 0)) { - kfree(nvbo); - return ERR_PTR(-EINVAL); - } + if (WARN_ON(pi < 0)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + + /* Disable compression if suitable settings couldn't be found. */ + if (nvbo->comp && !vmm->page[pi].comp) { + if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) + nvbo->kind = mmu->kind[nvbo->kind]; + nvbo->comp = 0; + } + nvbo->page = vmm->page[pi].shift; + } else { + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) + continue; + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; - /* Disable compression if suitable settings couldn't be found. */ - if (nvbo->comp && !vmm->page[pi].comp) { - if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) - nvbo->kind = mmu->kind[nvbo->kind]; - nvbo->comp = 0; + /* pick the last one as it will be smallest. */ + pi = i; + + /* Stop once the buffer is larger than the current page size. */ + if (*size >= 1ULL << vmm->page[i].shift) + break; + } + if (WARN_ON(pi < 0)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + nvbo->page = vmm->page[pi].shift; } - nvbo->page = vmm->page[pi].shift; nouveau_bo_fixup_align(nvbo, align, size); @@ -306,18 +347,26 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, { int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; int ret; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + .resv = robj, + }; nouveau_bo_placement_set(nvbo, domain, 0); INIT_LIST_HEAD(&nvbo->io_reserve_lru); - ret = ttm_bo_init_validate(nvbo->bo.bdev, &nvbo->bo, type, - &nvbo->placement, align >> PAGE_SHIFT, false, + ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type, + &nvbo->placement, align >> PAGE_SHIFT, &ctx, sg, robj, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } + if (!robj) + ttm_bo_unreserve(&nvbo->bo); + return 0; } @@ -331,7 +380,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, int ret; nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, - tile_flags); + tile_flags, true); if (IS_ERR(nvbo)) return PTR_ERR(nvbo); @@ -339,6 +388,11 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, dma_resv_init(&nvbo->bo.base._resv); drm_vma_node_reset(&nvbo->bo.base.vma_node); + /* This must be called before ttm_bo_init_reserved(). Subsequent + * bo_move() callbacks might already iterate the GEMs GPUVA list. + */ + drm_gem_gpuva_init(&nvbo->bo.base); + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); if (ret) return ret; @@ -347,25 +401,81 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, return 0; } -static void -set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain) +void +nouveau_bo_unpin_del(struct nouveau_bo **pnvbo) { - *n = 0; + struct nouveau_bo *nvbo = *pnvbo; - if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { - pl[*n].mem_type = TTM_PL_VRAM; - pl[*n].flags = 0; - (*n)++; + if (!nvbo) + return; + + nouveau_bo_unmap(nvbo); + nouveau_bo_unpin(nvbo); + nouveau_bo_fini(nvbo); + + *pnvbo = NULL; +} + +int +nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo); + if (ret) + return ret; + + ret = nouveau_bo_pin(nvbo, domain, false); + if (ret) { + nouveau_bo_fini(nvbo); + return ret; } - if (domain & NOUVEAU_GEM_DOMAIN_GART) { - pl[*n].mem_type = TTM_PL_TT; - pl[*n].flags = 0; - (*n)++; + + *pnvbo = nvbo; + return 0; +} + +int +nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new_pin(cli, domain, size, &nvbo); + if (ret) + return ret; + + ret = nouveau_bo_map(nvbo); + if (ret) { + nouveau_bo_unpin_del(&nvbo); + return ret; } - if (domain & NOUVEAU_GEM_DOMAIN_CPU) { - pl[*n].mem_type = TTM_PL_SYSTEM; - pl[(*n)++].flags = 0; + + *pnvbo = nvbo; + return 0; +} + +int +nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size, + struct nouveau_bo **pnvbo, struct nouveau_vma **pvma) +{ + struct nouveau_vmm *vmm = nouveau_cli_vmm(cli); + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new_map(cli, domain, size, &nvbo); + if (ret) + return ret; + + ret = nouveau_vma_new(nvbo, vmm, pvma); + if (ret) { + nouveau_bo_unpin_del(&nvbo); + return ret; } + + *pnvbo = nvbo; + return 0; } static void @@ -395,10 +505,6 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) nvbo->placements[i].fpfn = fpfn; nvbo->placements[i].lpfn = lpfn; } - for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { - nvbo->busy_placements[i].fpfn = fpfn; - nvbo->busy_placements[i].lpfn = lpfn; - } } } @@ -406,29 +512,43 @@ void nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, uint32_t busy) { - struct ttm_placement *pl = &nvbo->placement; + unsigned int *n = &nvbo->placement.num_placement; + struct ttm_place *pl = nvbo->placements; - pl->placement = nvbo->placements; - set_placement_list(nvbo->placements, &pl->num_placement, domain); + domain |= busy; - pl->busy_placement = nvbo->busy_placements; - set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, - domain | busy); + *n = 0; + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { + pl[*n].mem_type = TTM_PL_VRAM; + pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ? + TTM_PL_FLAG_FALLBACK : 0; + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_GART) { + pl[*n].mem_type = TTM_PL_TT; + pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ? + TTM_PL_FLAG_FALLBACK : 0; + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_CPU) { + pl[*n].mem_type = TTM_PL_SYSTEM; + pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ? + TTM_PL_FLAG_FALLBACK : 0; + (*n)++; + } + nvbo->placement.placement = nvbo->placements; set_placement_range(nvbo, domain); } -int -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) +int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; bool force = false, evict = false; - int ret; + int ret = 0; - ret = ttm_bo_reserve(bo, false, false, NULL); - if (ret) - return ret; + dma_resv_assert_held(bo->base.resv); if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { @@ -491,20 +611,15 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) out: if (force && ret) nvbo->contig = false; - ttm_bo_unreserve(bo); return ret; } -int -nouveau_bo_unpin(struct nouveau_bo *nvbo) +void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret; - ret = ttm_bo_reserve(bo, false, false, NULL); - if (ret) - return ret; + dma_resv_assert_held(bo->base.resv); ttm_bo_unpin(&nvbo->bo); if (!nvbo->bo.pin_count) { @@ -519,8 +634,33 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) break; } } +} +int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) +{ + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret) + return ret; + ret = nouveau_bo_pin_locked(nvbo, domain, contig); + ttm_bo_unreserve(bo); + + return ret; +} + +int nouveau_bo_unpin(struct nouveau_bo *nvbo) +{ + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret) + return ret; + nouveau_bo_unpin_locked(nvbo); ttm_bo_unreserve(bo); + return 0; } @@ -789,7 +929,7 @@ done: nvif_vmm_put(vmm, &old_mem->vma[1]); nvif_vmm_put(vmm, &old_mem->vma[0]); } - return 0; + return ret; } static int @@ -799,7 +939,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; - struct nouveau_cli *cli = (void *)chan->user.client; + struct nouveau_cli *cli = chan->cli; struct nouveau_fence *fence; int ret; @@ -817,29 +957,33 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, mutex_lock(&cli->mutex); else mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); + ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); - if (ret == 0) { - ret = drm->ttm.move(chan, bo, bo->resource, new_reg); - if (ret == 0) { - ret = nouveau_fence_new(chan, false, &fence); - if (ret == 0) { - /* TODO: figure out a better solution here - * - * wait on the fence here explicitly as going through - * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. - * - * Without this the operation can timeout and we'll fallback to a - * software copy, which might take several minutes to finish. - */ - nouveau_fence_wait(fence, false, false); - ret = ttm_bo_move_accel_cleanup(bo, - &fence->base, - evict, false, - new_reg); - nouveau_fence_unref(&fence); - } - } - } + if (ret) + goto out_unlock; + + ret = drm->ttm.move(chan, bo, bo->resource, new_reg); + if (ret) + goto out_unlock; + + ret = nouveau_fence_new(&fence, chan); + if (ret) + goto out_unlock; + + /* TODO: figure out a better solution here + * + * wait on the fence here explicitly as going through + * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. + * + * Without this the operation can timeout and we'll fallback to a + * software copy, which might take several minutes to finish. + */ + nouveau_fence_wait(fence, false, false); + ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, + new_reg); + nouveau_fence_unref(&fence); + +out_unlock: mutex_unlock(&cli->mutex); return ret; } @@ -856,6 +1000,9 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init }, { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init }, @@ -935,6 +1082,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, list_for_each_entry(vma, &nvbo->vma_list, head) { nouveau_vma_map(vma, mem); } + nouveau_uvmm_bo_map_all(nvbo, mem); } else { list_for_each_entry(vma, &nvbo->vma_list, head) { ret = dma_resv_wait_timeout(bo->base.resv, @@ -943,6 +1091,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, WARN_ON(ret <= 0); nouveau_vma_unmap(vma); } + nouveau_uvmm_bo_unmap_all(nvbo); } if (new_reg) @@ -999,25 +1148,23 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); + struct drm_gem_object *obj = &bo->base; struct ttm_resource *old_reg = bo->resource; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) return ret; } + drm_gpuvm_bo_gem_evict(obj, evict); nouveau_bo_move_ntfy(bo, new_reg); ret = ttm_bo_wait_ctx(bo, ctx); if (ret) goto out_ntfy; - if (nvbo->bo.pin_count) - NV_WARN(drm, "Moving pinned object %p!\n", nvbo); - if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); if (ret) @@ -1077,6 +1224,7 @@ out: out_ntfy: if (ret) { nouveau_bo_move_ntfy(bo, bo->resource); + drm_gpuvm_bo_gem_evict(obj, !evict); } return ret; } @@ -1106,7 +1254,7 @@ static int nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nvkm_device *device = nvxx_device(&drm->client.device); + struct nvkm_device *device = nvxx_device(drm); struct nouveau_mem *mem = nouveau_mem(reg); struct nvif_mmu *mmu = &drm->client.mmu; int ret; @@ -1136,7 +1284,7 @@ retry: fallthrough; /* tiled memory */ case TTM_PL_VRAM: reg->bus.offset = (reg->start << PAGE_SHIFT) + - device->func->resource_addr(device, 1); + device->func->resource_addr(device, NVKM_BAR1_FB); reg->bus.is_iomem = true; /* Some BARs do not support being ioremapped WC */ @@ -1202,6 +1350,8 @@ out: drm_vma_node_unmap(&nvbo->bo.base.vma_node, bdev->dev_mapping); nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); + nvbo->bo.resource->bus.offset = 0; + nvbo->bo.resource->bus.addr = NULL; goto retry; } @@ -1224,8 +1374,8 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nvkm_device *device = nvxx_device(&drm->client.device); - u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; + struct nvkm_device *device = nvxx_device(drm); + u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT; int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got @@ -1252,11 +1402,6 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->placements[i].lpfn = mappable; } - for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { - nvbo->busy_placements[i].fpfn = 0; - nvbo->busy_placements[i].lpfn = mappable; - } - nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); } |
