diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 1963 |
1 files changed, 952 insertions, 1011 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb131380..f26562eafffc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -27,20 +27,26 @@ * Jeremy Kolb <jkolb@brandeis.edu> */ -#include <core/engine.h> -#include <linux/swiotlb.h> +#include <linux/dma-mapping.h> +#include <drm/ttm/ttm_tt.h> -#include <subdev/fb.h> -#include <subdev/vm.h> -#include <subdev/bar.h> - -#include "nouveau_drm.h" -#include "nouveau_dma.h" +#include "nouveau_drv.h" +#include "nouveau_chan.h" #include "nouveau_fence.h" #include "nouveau_bo.h" #include "nouveau_ttm.h" #include "nouveau_gem.h" +#include "nouveau_mem.h" +#include "nouveau_vmm.h" + +#include <nvif/class.h> +#include <nvif/if500b.h> +#include <nvif/if900b.h> + +static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, + struct ttm_resource *reg); +static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm); /* * NV10-NV40 tiling helpers @@ -52,24 +58,18 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nouveau_fb *pfb = nouveau_fb(drm->device); - struct nouveau_fb_tile *tile = &pfb->tile.region[i]; - struct nouveau_engine *engine; + struct nvkm_fb *fb = nvxx_fb(drm); + struct nvkm_fb_tile *tile = &fb->tile.region[i]; nouveau_fence_unref(®->fence); if (tile->pitch) - pfb->tile.fini(pfb, i, tile); + nvkm_fb_tile_fini(fb, i, tile); if (pitch) - pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); + nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); - pfb->tile.prog(pfb, i, tile); - - if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) - engine->tile_prog(engine, i); - if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) - engine->tile_prog(engine, i); + nvkm_fb_tile_prog(fb, i, tile); } static struct nouveau_drm_tile * @@ -92,18 +92,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) static void nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, - struct nouveau_fence *fence) + struct dma_fence *fence) { struct nouveau_drm *drm = nouveau_drm(dev); if (tile) { spin_lock(&drm->tile.lock); - if (fence) { - /* Mark it as pending. */ - tile->fence = fence; - nouveau_fence_ref(fence); - } - + tile->fence = (struct nouveau_fence *)dma_fence_get(fence); tile->used = false; spin_unlock(&drm->tile.lock); } @@ -111,21 +106,21 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, static struct nouveau_drm_tile * nv10_bo_set_tiling(struct drm_device *dev, u32 addr, - u32 size, u32 pitch, u32 flags) + u32 size, u32 pitch, u32 zeta) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nvkm_fb *fb = nvxx_fb(drm); struct nouveau_drm_tile *tile, *found = NULL; int i; - for (i = 0; i < pfb->tile.regions; i++) { + for (i = 0; i < fb->tile.regions; i++) { tile = nv10_bo_get_tile_region(dev, i); if (pitch && !found) { found = tile; continue; - } else if (tile && pfb->tile.region[i].pitch) { + } else if (tile && fb->tile.region[i].pitch) { /* Kill an unused tile region. */ nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); } @@ -134,8 +129,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, } if (found) - nv10_bo_update_tile_region(dev, found, addr, size, - pitch, flags); + nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); return found; } @@ -146,240 +140,541 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - if (unlikely(nvbo->gem)) - DRM_ERROR("bo %p still attached to GEM object\n", bo); + WARN_ON(nvbo->bo.pin_count > 0); + nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sg); + + /* + * If nouveau_bo_new() allocated this buffer, the GEM object was never + * initialized, so don't attempt to release it. + */ + if (bo->base.dev) { + /* Gem objects not being shared with other VMs get their + * dma_resv from a root GEM object. + */ + if (nvbo->no_share) + drm_gem_object_put(nvbo->r_obj); + + drm_gem_object_release(&bo->base); + } else { + dma_resv_fini(&bo->base._resv); + } + kfree(nvbo); } +static inline u64 +roundup_64(u64 x, u32 y) +{ + x += y - 1; + do_div(x, y); + return x * y; +} + static void -nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, - int *align, int *size) +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nouveau_device *device = nv_device(drm->device); + struct nvif_device *device = &drm->client.device; - if (device->card_type < NV_50) { - if (nvbo->tile_mode) { - if (device->chipset >= 0x40) { + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { + if (nvbo->mode) { + if (device->info.chipset >= 0x40) { *align = 65536; - *size = roundup(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); - } else if (device->chipset >= 0x30) { + } else if (device->info.chipset >= 0x30) { *align = 32768; - *size = roundup(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); - } else if (device->chipset >= 0x20) { + } else if (device->info.chipset >= 0x20) { *align = 16384; - *size = roundup(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->mode); - } else if (device->chipset >= 0x10) { + } else if (device->info.chipset >= 0x10) { *align = 16384; - *size = roundup(*size, 32 * nvbo->tile_mode); + *size = roundup_64(*size, 32 * nvbo->mode); } } } else { - *size = roundup(*size, (1 << nvbo->page_shift)); - *align = max((1 << nvbo->page_shift), *align); + *size = roundup_64(*size, (1 << nvbo->page)); + *align = max((1 << nvbo->page), *align); } - *size = roundup(*size, PAGE_SIZE); + *size = roundup_64(*size, PAGE_SIZE); } -int -nouveau_bo_new(struct drm_device *dev, int size, int align, - uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, - struct sg_table *sg, - struct nouveau_bo **pnvbo) +struct nouveau_bo * +nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, + u32 tile_mode, u32 tile_flags, bool internal) { - struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_drm *drm = cli->drm; struct nouveau_bo *nvbo; - size_t acc_size; - int ret; - int type = ttm_bo_type_device; + struct nvif_mmu *mmu = &cli->mmu; + struct nvif_vmm *vmm = &nouveau_cli_vmm(cli)->vmm; + int i, pi = -1; - if (sg) - type = ttm_bo_type_sg; + if (!*size) { + NV_WARN(drm, "skipped size %016llx\n", *size); + return ERR_PTR(-EINVAL); + } nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); INIT_LIST_HEAD(&nvbo->vma_list); - nvbo->tile_mode = tile_mode; - nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; - nvbo->page_shift = 12; - if (drm->client.base.vm) { - if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; + /* This is confusing, and doesn't actually mean we want an uncached + * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated + * into in nouveau_gem_new(). + */ + if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { + /* Determine if we can get a cache-coherent map, forcing + * uncached mapping if we can't. + */ + if (!nouveau_drm_use_coherent_gpu_mapping(drm)) + nvbo->force_coherent = true; } - nouveau_bo_fixup_align(nvbo, flags, &align, &size); - nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; - nouveau_bo_placement_set(nvbo, flags, 0); + nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); - acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, - sizeof(struct nouveau_bo)); + if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { + nvbo->kind = (tile_flags & 0x0000ff00) >> 8; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } - ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, - type, &nvbo->placement, - align >> PAGE_SHIFT, false, NULL, acc_size, sg, - nouveau_bo_del_ttm); + nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; + } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + nvbo->kind = (tile_flags & 0x00007f00) >> 8; + nvbo->comp = (tile_flags & 0x00030000) >> 16; + if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + } else { + nvbo->zeta = (tile_flags & 0x00000007); + } + nvbo->mode = tile_mode; + + if (!nouveau_cli_uvmm(cli) || internal) { + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) + continue; + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; + + /* Select this page size if it's the first that supports + * the potential memory domains, or when it's compatible + * with the requested compression settings. + */ + if (pi < 0 || !nvbo->comp || vmm->page[i].comp) + pi = i; + + /* Stop once the buffer is larger than the current page size. */ + if (*size >= 1ULL << vmm->page[i].shift) + break; + } + + if (WARN_ON(pi < 0)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + + /* Disable compression if suitable settings couldn't be found. */ + if (nvbo->comp && !vmm->page[pi].comp) { + if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) + nvbo->kind = mmu->kind[nvbo->kind]; + nvbo->comp = 0; + } + nvbo->page = vmm->page[pi].shift; + } else { + /* Determine the desirable target GPU page size for the buffer. */ + for (i = 0; i < vmm->page_nr; i++) { + /* Because we cannot currently allow VMM maps to fail + * during buffer migration, we need to determine page + * size for the buffer up-front, and pre-allocate its + * page tables. + * + * Skip page sizes that can't support needed domains. + */ + if ((domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) + continue; + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) + continue; + + /* pick the last one as it will be smallest. */ + pi = i; + + /* Stop once the buffer is larger than the current page size. */ + if (*size >= 1ULL << vmm->page[i].shift) + break; + } + if (WARN_ON(pi < 0)) { + kfree(nvbo); + return ERR_PTR(-EINVAL); + } + nvbo->page = vmm->page[pi].shift; + } + + nouveau_bo_fixup_align(nvbo, align, size); + + return nvbo; +} + +int +nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, + struct sg_table *sg, struct dma_resv *robj) +{ + int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; + int ret; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + .resv = robj, + }; + + nouveau_bo_placement_set(nvbo, domain, 0); + INIT_LIST_HEAD(&nvbo->io_reserve_lru); + + ret = ttm_bo_init_reserved(nvbo->bo.bdev, &nvbo->bo, type, + &nvbo->placement, align >> PAGE_SHIFT, &ctx, + sg, robj, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } + if (!robj) + ttm_bo_unreserve(&nvbo->bo); + + return 0; +} + +int +nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, + uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, + struct sg_table *sg, struct dma_resv *robj, + struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, + tile_flags, true); + if (IS_ERR(nvbo)) + return PTR_ERR(nvbo); + + nvbo->bo.base.size = size; + dma_resv_init(&nvbo->bo.base._resv); + drm_vma_node_reset(&nvbo->bo.base.vma_node); + + /* This must be called before ttm_bo_init_reserved(). Subsequent + * bo_move() callbacks might already iterate the GEMs GPUVA list. + */ + drm_gem_gpuva_init(&nvbo->bo.base); + + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); + if (ret) + return ret; + *pnvbo = nvbo; return 0; } -static void -set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) +void +nouveau_bo_unpin_del(struct nouveau_bo **pnvbo) { - *n = 0; + struct nouveau_bo *nvbo = *pnvbo; + + if (!nvbo) + return; + + nouveau_bo_unmap(nvbo); + nouveau_bo_unpin(nvbo); + nouveau_bo_fini(nvbo); - if (type & TTM_PL_FLAG_VRAM) - pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; - if (type & TTM_PL_FLAG_TT) - pl[(*n)++] = TTM_PL_FLAG_TT | flags; - if (type & TTM_PL_FLAG_SYSTEM) - pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; + *pnvbo = NULL; +} + +int +nouveau_bo_new_pin(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new(cli, size, 0, domain, 0, 0, NULL, NULL, &nvbo); + if (ret) + return ret; + + ret = nouveau_bo_pin(nvbo, domain, false); + if (ret) { + nouveau_bo_fini(nvbo); + return ret; + } + + *pnvbo = nvbo; + return 0; +} + +int +nouveau_bo_new_map(struct nouveau_cli *cli, u32 domain, u32 size, struct nouveau_bo **pnvbo) +{ + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new_pin(cli, domain, size, &nvbo); + if (ret) + return ret; + + ret = nouveau_bo_map(nvbo); + if (ret) { + nouveau_bo_unpin_del(&nvbo); + return ret; + } + + *pnvbo = nvbo; + return 0; +} + +int +nouveau_bo_new_map_gpu(struct nouveau_cli *cli, u32 domain, u32 size, + struct nouveau_bo **pnvbo, struct nouveau_vma **pvma) +{ + struct nouveau_vmm *vmm = nouveau_cli_vmm(cli); + struct nouveau_bo *nvbo; + int ret; + + ret = nouveau_bo_new_map(cli, domain, size, &nvbo); + if (ret) + return ret; + + ret = nouveau_vma_new(nvbo, vmm, pvma); + if (ret) { + nouveau_bo_unpin_del(&nvbo); + return ret; + } + + *pnvbo = nvbo; + return 0; } static void -set_placement_range(struct nouveau_bo *nvbo, uint32_t type) +set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nouveau_fb *pfb = nouveau_fb(drm->device); - u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; + u64 vram_size = drm->client.device.info.ram_size; + unsigned i, fpfn, lpfn; - if (nv_device(drm->device)->card_type == NV_10 && - nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && - nvbo->bo.mem.num_pages < vram_pages / 4) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && + nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && + nvbo->bo.base.size < vram_size / 4) { /* * Make sure that the color and depth buffers are handled * by independent memory controller units. Up to a 9x * speed up when alpha-blending and depth-test are enabled * at the same time. */ - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { - nvbo->placement.fpfn = vram_pages / 2; - nvbo->placement.lpfn = ~0; + if (nvbo->zeta) { + fpfn = (vram_size / 2) >> PAGE_SHIFT; + lpfn = ~0; } else { - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = vram_pages / 2; + fpfn = 0; + lpfn = (vram_size / 2) >> PAGE_SHIFT; + } + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = fpfn; + nvbo->placements[i].lpfn = lpfn; } } } void -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, + uint32_t busy) { - struct ttm_placement *pl = &nvbo->placement; - uint32_t flags = TTM_PL_MASK_CACHING | - (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); + unsigned int *n = &nvbo->placement.num_placement; + struct ttm_place *pl = nvbo->placements; - pl->placement = nvbo->placements; - set_placement_list(nvbo->placements, &pl->num_placement, - type, flags); + domain |= busy; - pl->busy_placement = nvbo->busy_placements; - set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, - type | busy, flags); + *n = 0; + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { + pl[*n].mem_type = TTM_PL_VRAM; + pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_VRAM ? + TTM_PL_FLAG_FALLBACK : 0; + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_GART) { + pl[*n].mem_type = TTM_PL_TT; + pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_GART ? + TTM_PL_FLAG_FALLBACK : 0; + (*n)++; + } + if (domain & NOUVEAU_GEM_DOMAIN_CPU) { + pl[*n].mem_type = TTM_PL_SYSTEM; + pl[*n].flags = busy & NOUVEAU_GEM_DOMAIN_CPU ? + TTM_PL_FLAG_FALLBACK : 0; + (*n)++; + } - set_placement_range(nvbo, type); + nvbo->placement.placement = nvbo->placements; + set_placement_range(nvbo, domain); } -int -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) +int nouveau_bo_pin_locked(struct nouveau_bo *nvbo, uint32_t domain, bool contig) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret; + bool force = false, evict = false; + int ret = 0; - ret = ttm_bo_reserve(bo, false, false, false, 0); - if (ret) - goto out; + dma_resv_assert_held(bo->base.resv); - if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { - NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, - 1 << bo->mem.mem_type, memtype); - ret = -EINVAL; - goto out; + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && + domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { + if (!nvbo->contig) { + nvbo->contig = true; + force = true; + evict = true; + } } - if (nvbo->pin_refcnt++) - goto out; - - nouveau_bo_placement_set(nvbo, memtype, 0); + if (nvbo->bo.pin_count) { + bool error = evict; - ret = nouveau_bo_validate(nvbo, false, false); - if (ret == 0) { - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: - drm->gem.vram_available -= bo->mem.size; + error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); break; case TTM_PL_TT: - drm->gem.gart_available -= bo->mem.size; + error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); break; default: break; } + + if (error) { + NV_ERROR(drm, "bo %p pinned elsewhere: " + "0x%08x vs 0x%08x\n", bo, + bo->resource->mem_type, domain); + ret = -EBUSY; + } + ttm_bo_pin(&nvbo->bo); + goto out; + } + + if (evict) { + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); + ret = nouveau_bo_validate(nvbo, false, false); + if (ret) + goto out; + } + + nouveau_bo_placement_set(nvbo, domain, 0); + ret = nouveau_bo_validate(nvbo, false, false); + if (ret) + goto out; + + ttm_bo_pin(&nvbo->bo); + + switch (bo->resource->mem_type) { + case TTM_PL_VRAM: + drm->gem.vram_available -= bo->base.size; + break; + case TTM_PL_TT: + drm->gem.gart_available -= bo->base.size; + break; + default: + break; } + out: - ttm_bo_unreserve(bo); + if (force && ret) + nvbo->contig = false; return ret; } -int -nouveau_bo_unpin(struct nouveau_bo *nvbo) +void nouveau_bo_unpin_locked(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret; - - ret = ttm_bo_reserve(bo, false, false, false, 0); - if (ret) - return ret; - if (--nvbo->pin_refcnt) - goto out; - - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); + dma_resv_assert_held(bo->base.resv); - ret = nouveau_bo_validate(nvbo, false, false); - if (ret == 0) { - switch (bo->mem.mem_type) { + ttm_bo_unpin(&nvbo->bo); + if (!nvbo->bo.pin_count) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: - drm->gem.vram_available += bo->mem.size; + drm->gem.vram_available += bo->base.size; break; case TTM_PL_TT: - drm->gem.gart_available += bo->mem.size; + drm->gem.gart_available += bo->base.size; break; default: break; } } +} -out: +int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) +{ + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret) + return ret; + ret = nouveau_bo_pin_locked(nvbo, domain, contig); ttm_bo_unreserve(bo); + return ret; } +int nouveau_bo_unpin(struct nouveau_bo *nvbo) +{ + struct ttm_buffer_object *bo = &nvbo->bo; + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret) + return ret; + nouveau_bo_unpin_locked(nvbo); + ttm_bo_unreserve(bo); + + return 0; +} + int nouveau_bo_map(struct nouveau_bo *nvbo) { int ret; - ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); if (ret) return ret; - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); + ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); + ttm_bo_unreserve(&nvbo->bo); return ret; } @@ -387,34 +682,118 @@ nouveau_bo_map(struct nouveau_bo *nvbo) void nouveau_bo_unmap(struct nouveau_bo *nvbo) { - if (nvbo) - ttm_bo_kunmap(&nvbo->kmap); + if (!nvbo) + return; + + ttm_bo_kunmap(&nvbo->kmap); +} + +void +nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) +{ + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; + int i, j; + + if (!ttm_dma || !ttm_dma->dma_address) + return; + if (!ttm_dma->pages) { + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); + return; + } + + /* Don't waste time looping if the object is coherent */ + if (nvbo->force_coherent) + return; + + i = 0; + while (i < ttm_dma->num_pages) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } + dma_sync_single_for_device(drm->dev->dev, + ttm_dma->dma_address[i], + num_pages * PAGE_SIZE, DMA_TO_DEVICE); + i += num_pages; + } +} + +void +nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) +{ + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; + int i, j; + + if (!ttm_dma || !ttm_dma->dma_address) + return; + if (!ttm_dma->pages) { + NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); + return; + } + + /* Don't waste time looping if the object is coherent */ + if (nvbo->force_coherent) + return; + + i = 0; + while (i < ttm_dma->num_pages) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } + + dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], + num_pages * PAGE_SIZE, DMA_FROM_DEVICE); + i += num_pages; + } +} + +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); +} + +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + + mutex_lock(&drm->ttm.io_reserve_mutex); + list_del_init(&nvbo->io_reserve_lru); + mutex_unlock(&drm->ttm.io_reserve_mutex); } int nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, bool no_wait_gpu) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; int ret; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - interruptible, no_wait_gpu); + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); if (ret) return ret; - return 0; -} + nouveau_bo_sync_for_device(nvbo); -u16 -nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) -{ - bool is_iomem; - u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = &mem[index]; - if (is_iomem) - return ioread16_native((void __force __iomem *)mem); - else - return *mem; + return 0; } void @@ -422,7 +801,9 @@ nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) { bool is_iomem; u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = &mem[index]; + + mem += index; + if (is_iomem) iowrite16_native(val, (void __force __iomem *)mem); else @@ -434,7 +815,9 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) { bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = &mem[index]; + + mem += index; + if (is_iomem) return ioread32_native((void __force __iomem *)mem); else @@ -446,7 +829,9 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) { bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = &mem[index]; + + mem += index; + if (is_iomem) iowrite32_native(val, (void __force __iomem *)mem); else @@ -454,81 +839,47 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) } static struct ttm_tt * -nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, - uint32_t page_flags, struct page *dummy_read) +nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { -#if __OS_HAS_AGP - struct nouveau_drm *drm = nouveau_bdev(bdev); - struct drm_device *dev = drm->dev; +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - if (drm->agp.stat == ENABLED) { - return ttm_agp_tt_create(bdev, dev->agp->bridge, size, - page_flags, dummy_read); + if (drm->agp.bridge) { + return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); } #endif - return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); + return nouveau_sgdma_create_ttm(bo, page_flags); } static int -nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) +nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm, + struct ttm_resource *reg) { - /* We'll do this from user space. */ - return 0; +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); +#endif + if (!reg) + return -EINVAL; +#if IS_ENABLED(CONFIG_AGP) + if (drm->agp.bridge) + return ttm_agp_bind(ttm, reg); +#endif + return nouveau_sgdma_bind(bdev, ttm, reg); } -static int -nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) +static void +nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { +#if IS_ENABLED(CONFIG_AGP) struct nouveau_drm *drm = nouveau_bdev(bdev); - switch (type) { - case TTM_PL_SYSTEM: - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_VRAM: - if (nv_device(drm->device)->card_type >= NV_50) { - man->func = &nouveau_vram_manager; - man->io_reserve_fastpath = false; - man->use_io_reserve_lru = true; - } else { - man->func = &ttm_bo_manager_func; - } - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - break; - case TTM_PL_TT: - if (nv_device(drm->device)->card_type >= NV_50) - man->func = &nouveau_gart_manager; - else - if (drm->agp.stat != ENABLED) - man->func = &nv04_gart_manager; - else - man->func = &ttm_bo_manager_func; - - if (drm->agp.stat == ENABLED) { - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - } else { - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | - TTM_MEMTYPE_FLAG_CMA; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - } - - break; - default: - return -EINVAL; + if (drm->agp.bridge) { + ttm_agp_unbind(ttm); + return; } - return 0; +#endif + nouveau_sgdma_unbind(bdev, ttm); } static void @@ -536,485 +887,137 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) { struct nouveau_bo *nvbo = nouveau_bo(bo); - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { case TTM_PL_VRAM: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, - TTM_PL_FLAG_SYSTEM); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, + NOUVEAU_GEM_DOMAIN_CPU); break; default: - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); break; } *pl = nvbo->placement; } - -/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access - * TTM_PL_{VRAM,TT} directly. - */ - static int -nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, - struct nouveau_bo *nvbo, bool evict, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) +nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, + struct ttm_resource *reg) { - struct nouveau_fence *fence = NULL; + struct nouveau_mem *old_mem = nouveau_mem(bo->resource); + struct nouveau_mem *new_mem = nouveau_mem(reg); + struct nvif_vmm *vmm = &drm->client.vmm.vmm; int ret; - ret = nouveau_fence_new(chan, false, &fence); + ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, + old_mem->mem.size, &old_mem->vma[0]); if (ret) return ret; - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, - no_wait_gpu, new_mem); - nouveau_fence_unref(&fence); - return ret; -} - -static int -nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) -{ - int ret = RING_SPACE(chan, 2); - if (ret == 0) { - BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); - FIRE_RING (chan); - } - return ret; -} - -static int -nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - struct nouveau_mem *node = old_mem->mm_node; - int ret = RING_SPACE(chan, 10); - if (ret == 0) { - BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); - OUT_RING (chan, upper_32_bits(node->vma[0].offset)); - OUT_RING (chan, lower_32_bits(node->vma[0].offset)); - OUT_RING (chan, upper_32_bits(node->vma[1].offset)); - OUT_RING (chan, lower_32_bits(node->vma[1].offset)); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, new_mem->num_pages); - BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); - } - return ret; -} - -static int -nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) -{ - int ret = RING_SPACE(chan, 2); - if (ret == 0) { - BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); - } - return ret; -} - -static int -nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - struct nouveau_mem *node = old_mem->mm_node; - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - u32 page_count = new_mem->num_pages; - int ret; - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 8191) ? 8191 : page_count; - - ret = RING_SPACE(chan, 11); - if (ret) - return ret; - - BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); - OUT_RING (chan, upper_32_bits(src_offset)); - OUT_RING (chan, lower_32_bits(src_offset)); - OUT_RING (chan, upper_32_bits(dst_offset)); - OUT_RING (chan, lower_32_bits(dst_offset)); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, line_count); - BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); - OUT_RING (chan, 0x00000110); - - page_count -= line_count; - src_offset += (PAGE_SIZE * line_count); - dst_offset += (PAGE_SIZE * line_count); - } - - return 0; -} - -static int -nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - struct nouveau_mem *node = old_mem->mm_node; - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - u32 page_count = new_mem->num_pages; - int ret; - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 2047) ? 2047 : page_count; - - ret = RING_SPACE(chan, 12); - if (ret) - return ret; - - BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); - OUT_RING (chan, upper_32_bits(dst_offset)); - OUT_RING (chan, lower_32_bits(dst_offset)); - BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); - OUT_RING (chan, upper_32_bits(src_offset)); - OUT_RING (chan, lower_32_bits(src_offset)); - OUT_RING (chan, PAGE_SIZE); /* src_pitch */ - OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ - OUT_RING (chan, PAGE_SIZE); /* line_length */ - OUT_RING (chan, line_count); - BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); - OUT_RING (chan, 0x00100110); - - page_count -= line_count; - src_offset += (PAGE_SIZE * line_count); - dst_offset += (PAGE_SIZE * line_count); - } - - return 0; -} - -static int -nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - struct nouveau_mem *node = old_mem->mm_node; - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - u32 page_count = new_mem->num_pages; - int ret; - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 8191) ? 8191 : page_count; - - ret = RING_SPACE(chan, 11); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); - OUT_RING (chan, upper_32_bits(src_offset)); - OUT_RING (chan, lower_32_bits(src_offset)); - OUT_RING (chan, upper_32_bits(dst_offset)); - OUT_RING (chan, lower_32_bits(dst_offset)); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, line_count); - BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); - OUT_RING (chan, 0x00000110); - - page_count -= line_count; - src_offset += (PAGE_SIZE * line_count); - dst_offset += (PAGE_SIZE * line_count); - } - - return 0; -} - -static int -nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - struct nouveau_mem *node = old_mem->mm_node; - int ret = RING_SPACE(chan, 7); - if (ret == 0) { - BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); - OUT_RING (chan, upper_32_bits(node->vma[0].offset)); - OUT_RING (chan, lower_32_bits(node->vma[0].offset)); - OUT_RING (chan, upper_32_bits(node->vma[1].offset)); - OUT_RING (chan, lower_32_bits(node->vma[1].offset)); - OUT_RING (chan, 0x00000000 /* COPY */); - OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); - } - return ret; -} + ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, + new_mem->mem.size, &old_mem->vma[1]); + if (ret) + goto done; -static int -nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - struct nouveau_mem *node = old_mem->mm_node; - int ret = RING_SPACE(chan, 7); - if (ret == 0) { - BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); - OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); - OUT_RING (chan, upper_32_bits(node->vma[0].offset)); - OUT_RING (chan, lower_32_bits(node->vma[0].offset)); - OUT_RING (chan, upper_32_bits(node->vma[1].offset)); - OUT_RING (chan, lower_32_bits(node->vma[1].offset)); - OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); - } - return ret; -} + ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); + if (ret) + goto done; -static int -nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) -{ - int ret = RING_SPACE(chan, 6); - if (ret == 0) { - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); - BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); - OUT_RING (chan, NvNotify0); - OUT_RING (chan, NvDmaFB); - OUT_RING (chan, NvDmaFB); + ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); +done: + if (ret) { + nvif_vmm_put(vmm, &old_mem->vma[1]); + nvif_vmm_put(vmm, &old_mem->vma[0]); } - return ret; } static int -nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_reg) { - struct nouveau_mem *node = old_mem->mm_node; - struct nouveau_bo *nvbo = nouveau_bo(bo); - u64 length = (new_mem->num_pages << PAGE_SHIFT); - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_channel *chan = drm->ttm.chan; + struct nouveau_cli *cli = chan->cli; + struct nouveau_fence *fence; int ret; - while (length) { - u32 amount, stride, height; - - amount = min(length, (u64)(4 * 1024 * 1024)); - stride = 16 * 4; - height = amount / stride; - - if (old_mem->mem_type == TTM_PL_VRAM && - nouveau_bo_tile_layout(nvbo)) { - ret = RING_SPACE(chan, 8); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); - OUT_RING (chan, 0); - OUT_RING (chan, 0); - OUT_RING (chan, stride); - OUT_RING (chan, height); - OUT_RING (chan, 1); - OUT_RING (chan, 0); - OUT_RING (chan, 0); - } else { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); - OUT_RING (chan, 1); - } - if (new_mem->mem_type == TTM_PL_VRAM && - nouveau_bo_tile_layout(nvbo)) { - ret = RING_SPACE(chan, 8); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); - OUT_RING (chan, 0); - OUT_RING (chan, 0); - OUT_RING (chan, stride); - OUT_RING (chan, height); - OUT_RING (chan, 1); - OUT_RING (chan, 0); - OUT_RING (chan, 0); - } else { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); - OUT_RING (chan, 1); - } - - ret = RING_SPACE(chan, 14); + /* create temporary vmas for the transfer and attach them to the + * old nvkm_mem node, these will get cleaned up after ttm has + * destroyed the ttm_resource + */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_move_prep(drm, bo, new_reg); if (ret) return ret; - - BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); - OUT_RING (chan, upper_32_bits(src_offset)); - OUT_RING (chan, upper_32_bits(dst_offset)); - BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); - OUT_RING (chan, lower_32_bits(src_offset)); - OUT_RING (chan, lower_32_bits(dst_offset)); - OUT_RING (chan, stride); - OUT_RING (chan, stride); - OUT_RING (chan, stride); - OUT_RING (chan, height); - OUT_RING (chan, 0x00000101); - OUT_RING (chan, 0x00000000); - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING (chan, 0); - - length -= amount; - src_offset += amount; - dst_offset += amount; - } - - return 0; -} - -static int -nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) -{ - int ret = RING_SPACE(chan, 4); - if (ret == 0) { - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); - BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); - OUT_RING (chan, NvNotify0); } - return ret; -} - -static inline uint32_t -nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, - struct nouveau_channel *chan, struct ttm_mem_reg *mem) -{ - if (mem->mem_type == TTM_PL_TT) - return NvDmaTT; - return NvDmaFB; -} - -static int -nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) -{ - u32 src_offset = old_mem->start << PAGE_SHIFT; - u32 dst_offset = new_mem->start << PAGE_SHIFT; - u32 page_count = new_mem->num_pages; - int ret; + if (drm_drv_uses_atomic_modeset(drm->dev)) + mutex_lock(&cli->mutex); + else + mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); - ret = RING_SPACE(chan, 3); + ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); - - page_count = new_mem->num_pages; - while (page_count) { - int line_count = (page_count > 2047) ? 2047 : page_count; - - ret = RING_SPACE(chan, 11); - if (ret) - return ret; - - BEGIN_NV04(chan, NvSubCopy, - NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); - OUT_RING (chan, src_offset); - OUT_RING (chan, dst_offset); - OUT_RING (chan, PAGE_SIZE); /* src_pitch */ - OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ - OUT_RING (chan, PAGE_SIZE); /* line_length */ - OUT_RING (chan, line_count); - OUT_RING (chan, 0x00000101); - OUT_RING (chan, 0x00000000); - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); - OUT_RING (chan, 0); - - page_count -= line_count; - src_offset += (PAGE_SIZE * line_count); - dst_offset += (PAGE_SIZE * line_count); - } - - return 0; -} - -static int -nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, - struct ttm_mem_reg *mem, struct nouveau_vma *vma) -{ - struct nouveau_mem *node = mem->mm_node; - int ret; + goto out_unlock; - ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages << - PAGE_SHIFT, node->page_shift, - NV_MEM_ACCESS_RW, vma); + ret = drm->ttm.move(chan, bo, bo->resource, new_reg); if (ret) - return ret; - - if (mem->mem_type == TTM_PL_VRAM) - nouveau_vm_map(vma, node); - else - nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node); - - return 0; -} - -static int -nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) -{ - struct nouveau_drm *drm = nouveau_bdev(bo->bdev); - struct nouveau_channel *chan = chan = drm->ttm.chan; - struct nouveau_bo *nvbo = nouveau_bo(bo); - struct ttm_mem_reg *old_mem = &bo->mem; - int ret; - - mutex_lock(&chan->cli->mutex); + goto out_unlock; - /* create temporary vmas for the transfer and attach them to the - * old nouveau_mem node, these will get cleaned up after ttm has - * destroyed the ttm_mem_reg + ret = nouveau_fence_new(&fence, chan); + if (ret) + goto out_unlock; + + /* TODO: figure out a better solution here + * + * wait on the fence here explicitly as going through + * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. + * + * Without this the operation can timeout and we'll fallback to a + * software copy, which might take several minutes to finish. */ - if (nv_device(drm->device)->card_type >= NV_50) { - struct nouveau_mem *node = old_mem->mm_node; - - ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); - if (ret) - goto out; - - ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); - if (ret) - goto out; - } - - ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); - if (ret == 0) { - ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, - no_wait_gpu, new_mem); - } + nouveau_fence_wait(fence, false, false); + ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, + new_reg); + nouveau_fence_unref(&fence); -out: - mutex_unlock(&chan->cli->mutex); +out_unlock: + mutex_unlock(&cli->mutex); return ret; } void nouveau_bo_move_init(struct nouveau_drm *drm) { - static const struct { + static const struct _method_table { const char *name; int engine; - u32 oclass; + s32 oclass; int (*exec)(struct nouveau_channel *, struct ttm_buffer_object *, - struct ttm_mem_reg *, struct ttm_mem_reg *); + struct ttm_resource *, struct ttm_resource *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { - { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xcab5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xc9b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xc8b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xc7b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc7b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc6b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc6b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, @@ -1024,30 +1027,29 @@ nouveau_bo_move_init(struct nouveau_drm *drm) { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, {}, - { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, - }, *mthd = _methods; + }; + const struct _method_table *mthd = _methods; const char *name = "CPU"; int ret; do { - struct nouveau_object *object; struct nouveau_channel *chan; - u32 handle = (mthd->engine << 16) | mthd->oclass; - if (mthd->init == nve0_bo_move_init) + if (mthd->engine) chan = drm->cechan; else chan = drm->channel; if (chan == NULL) continue; - ret = nouveau_object_new(nv_object(drm), chan->handle, handle, - mthd->oclass, NULL, 0, &object); + ret = nvif_object_ctor(&chan->user, "ttmBoMove", + mthd->oclass | (mthd->engine << 16), + mthd->oclass, NULL, 0, + &drm->ttm.copy); if (ret == 0) { - ret = mthd->init(chan, handle); + ret = mthd->init(chan, drm->ttm.copy.handle); if (ret) { - nouveau_object_del(nv_object(drm), - chan->handle, handle); + nvif_object_dtor(&drm->ttm.copy); continue; } @@ -1061,118 +1063,58 @@ nouveau_bo_move_init(struct nouveau_drm *drm) NV_INFO(drm, "MM: using %s for buffer copies\n", name); } -static int -nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) -{ - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - struct ttm_placement placement; - struct ttm_mem_reg tmp_mem; - int ret; - - placement.fpfn = placement.lpfn = 0; - placement.num_placement = placement.num_busy_placement = 1; - placement.placement = placement.busy_placement = &placement_memtype; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); - if (ret) - return ret; - - ret = ttm_tt_bind(bo->ttm, &tmp_mem); - if (ret) - goto out; - - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); - if (ret) - goto out; - - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); -out: - ttm_bo_mem_put(bo, &tmp_mem); - return ret; -} - -static int -nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) -{ - u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; - struct ttm_placement placement; - struct ttm_mem_reg tmp_mem; - int ret; - - placement.fpfn = placement.lpfn = 0; - placement.num_placement = placement.num_busy_placement = 1; - placement.placement = placement.busy_placement = &placement_memtype; - - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); - if (ret) - return ret; - - ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); - if (ret) - goto out; - - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); - if (ret) - goto out; - -out: - ttm_bo_mem_put(bo, &tmp_mem); - return ret; -} - -static void -nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) +static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, + struct ttm_resource *new_reg) { + struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_vma *vma; + long ret; /* ttm can now (stupidly) pass the driver bos it didn't create... */ if (bo->destroy != nouveau_bo_del_ttm) return; - list_for_each_entry(vma, &nvbo->vma_list, head) { - if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { - nouveau_vm_map(vma, new_mem->mm_node); - } else - if (new_mem && new_mem->mem_type == TTM_PL_TT && - nvbo->page_shift == vma->vm->vmm->spg_shift) { - if (((struct nouveau_mem *)new_mem->mm_node)->sg) - nouveau_vm_map_sg_table(vma, 0, new_mem-> - num_pages << PAGE_SHIFT, - new_mem->mm_node); - else - nouveau_vm_map_sg(vma, 0, new_mem-> - num_pages << PAGE_SHIFT, - new_mem->mm_node); - } else { - nouveau_vm_unmap(vma); + nouveau_bo_del_io_reserve_lru(bo); + + if (mem && new_reg->mem_type != TTM_PL_SYSTEM && + mem->mem.page == nvbo->page) { + list_for_each_entry(vma, &nvbo->vma_list, head) { + nouveau_vma_map(vma, mem); } + nouveau_uvmm_bo_map_all(nvbo, mem); + } else { + list_for_each_entry(vma, &nvbo->vma_list, head) { + ret = dma_resv_wait_timeout(bo->base.resv, + DMA_RESV_USAGE_BOOKKEEP, + false, 15 * HZ); + WARN_ON(ret <= 0); + nouveau_vma_unmap(vma); + } + nouveau_uvmm_bo_unmap_all(nvbo); } + + if (new_reg) + nvbo->offset = (new_reg->start << PAGE_SHIFT); + } static int -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, struct nouveau_drm_tile **new_tile) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - u64 offset = new_mem->start << PAGE_SHIFT; + u64 offset = new_reg->start << PAGE_SHIFT; *new_tile = NULL; - if (new_mem->mem_type != TTM_PL_VRAM) + if (new_reg->mem_type != TTM_PL_VRAM) return 0; - if (nv_device(drm->device)->card_type >= NV_10) { - *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, - nvbo->tile_mode, - nvbo->tile_flags); + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { + *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, + nvbo->mode, nvbo->zeta); } return 0; @@ -1185,375 +1127,374 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; + struct dma_fence *fence; + int ret; - nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); + ret = dma_resv_get_singleton(bo->base.resv, DMA_RESV_USAGE_WRITE, + &fence); + if (ret) + dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_WRITE, + false, MAX_SCHEDULE_TIMEOUT); + + nv10_bo_put_tile_region(dev, *old_tile, fence); *old_tile = new_tile; } static int -nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) +nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_reg, + struct ttm_place *hop) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct ttm_mem_reg *old_mem = &bo->mem; + struct drm_gem_object *obj = &bo->base; + struct ttm_resource *old_reg = bo->resource; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if (nv_device(drm->device)->card_type < NV_50) { - ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); + if (new_reg->mem_type == TTM_PL_TT) { + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); if (ret) return ret; } + drm_gpuvm_bo_gem_evict(obj, evict); + nouveau_bo_move_ntfy(bo, new_reg); + ret = ttm_bo_wait_ctx(bo, ctx); + if (ret) + goto out_ntfy; + + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); + if (ret) + goto out_ntfy; + } + /* Fake bo copy. */ - if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { - BUG_ON(bo->mem.mm_node != NULL); - bo->mem = *new_mem; - new_mem->mm_node = NULL; + if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM && + !bo->ttm)) { + ttm_bo_move_null(bo, new_reg); goto out; } - /* CPU copy if we have no accelerated method available */ - if (!drm->ttm.move) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + if (old_reg->mem_type == TTM_PL_SYSTEM && + new_reg->mem_type == TTM_PL_TT) { + ttm_bo_move_null(bo, new_reg); goto out; } - /* Hardware assisted copy. */ - if (new_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, intr, - no_wait_gpu, new_mem); - else if (old_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, intr, - no_wait_gpu, new_mem); - else - ret = nouveau_bo_move_m2mf(bo, evict, intr, - no_wait_gpu, new_mem); - - if (!ret) + if (old_reg->mem_type == TTM_PL_TT && + new_reg->mem_type == TTM_PL_SYSTEM) { + nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, new_reg); goto out; + } - /* Fallback to software copy. */ - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + /* Hardware assisted copy. */ + if (drm->ttm.move) { + if ((old_reg->mem_type == TTM_PL_SYSTEM && + new_reg->mem_type == TTM_PL_VRAM) || + (old_reg->mem_type == TTM_PL_VRAM && + new_reg->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + ret = nouveau_bo_move_m2mf(bo, evict, ctx, + new_reg); + } else + ret = -ENODEV; + + if (ret) { + /* Fallback to software copy. */ + ret = ttm_bo_move_memcpy(bo, ctx, new_reg); + } out: - if (nv_device(drm->device)->card_type < NV_50) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (ret) nouveau_bo_vm_cleanup(bo, NULL, &new_tile); else nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); } - +out_ntfy: + if (ret) { + nouveau_bo_move_ntfy(bo, bo->resource); + drm_gpuvm_bo_gem_evict(obj, !evict); + } return ret; } -static int -nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) +static void +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, + struct ttm_resource *reg) { - return 0; + struct nouveau_mem *mem = nouveau_mem(reg); + + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + switch (reg->mem_type) { + case TTM_PL_TT: + if (mem->kind) + nvif_object_unmap_handle(&mem->mem.object); + break; + case TTM_PL_VRAM: + nvif_object_unmap_handle(&mem->mem.object); + break; + default: + break; + } + } } static int -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct nouveau_drm *drm = nouveau_bdev(bdev); - struct drm_device *dev = drm->dev; + struct nvkm_device *device = nvxx_device(drm); + struct nouveau_mem *mem = nouveau_mem(reg); + struct nvif_mmu *mmu = &drm->client.mmu; int ret; - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; - switch (mem->mem_type) { + mutex_lock(&drm->ttm.io_reserve_mutex); +retry: + switch (reg->mem_type) { case TTM_PL_SYSTEM: /* System memory */ - return 0; + ret = 0; + goto out; case TTM_PL_TT: -#if __OS_HAS_AGP - if (drm->agp.stat == ENABLED) { - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = drm->agp.base; - mem->bus.is_iomem = !dev->agp->cant_use_aperture; +#if IS_ENABLED(CONFIG_AGP) + if (drm->agp.bridge) { + reg->bus.offset = (reg->start << PAGE_SHIFT) + + drm->agp.base; + reg->bus.is_iomem = !drm->agp.cma; + reg->bus.caching = ttm_write_combined; } #endif - break; + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || + !mem->kind) { + /* untiled */ + ret = 0; + break; + } + fallthrough; /* tiled memory */ case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = pci_resource_start(dev->pdev, 1); - mem->bus.is_iomem = true; - if (nv_device(drm->device)->card_type >= NV_50) { - struct nouveau_bar *bar = nouveau_bar(drm->device); - struct nouveau_mem *node = mem->mm_node; - - ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, - &node->bar_vma); - if (ret) - return ret; - - mem->bus.offset = node->bar_vma.offset; + reg->bus.offset = (reg->start << PAGE_SHIFT) + + device->func->resource_addr(device, NVKM_BAR1_FB); + reg->bus.is_iomem = true; + + /* Some BARs do not support being ioremapped WC */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && + mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) + reg->bus.caching = ttm_uncached; + else + reg->bus.caching = ttm_write_combined; + + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { + union { + struct nv50_mem_map_v0 nv50; + struct gf100_mem_map_v0 gf100; + } args; + u64 handle, length; + u32 argc = 0; + + switch (mem->mem.object.oclass) { + case NVIF_CLASS_MEM_NV50: + args.nv50.version = 0; + args.nv50.ro = 0; + args.nv50.kind = mem->kind; + args.nv50.comp = mem->comp; + argc = sizeof(args.nv50); + break; + case NVIF_CLASS_MEM_GF100: + args.gf100.version = 0; + args.gf100.ro = 0; + args.gf100.kind = mem->kind; + argc = sizeof(args.gf100); + break; + default: + WARN_ON(1); + break; + } + + ret = nvif_object_map_handle(&mem->mem.object, + &args, argc, + &handle, &length); + if (ret != 1) { + if (WARN_ON(ret == 0)) + ret = -EINVAL; + goto out; + } + + reg->bus.offset = handle; } + ret = 0; break; default: - return -EINVAL; + ret = -EINVAL; } - return 0; + +out: + if (ret == -ENOSPC) { + struct nouveau_bo *nvbo; + + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, + typeof(*nvbo), + io_reserve_lru); + if (nvbo) { + list_del_init(&nvbo->io_reserve_lru); + drm_vma_node_unmap(&nvbo->bo.base.vma_node, + bdev->dev_mapping); + nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); + nvbo->bo.resource->bus.offset = 0; + nvbo->bo.resource->bus.addr = NULL; + goto retry; + } + + } + mutex_unlock(&drm->ttm.io_reserve_mutex); + return ret; } static void -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg) { struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nouveau_bar *bar = nouveau_bar(drm->device); - struct nouveau_mem *node = mem->mm_node; - if (!node->bar_vma.node) - return; - - bar->unmap(bar, &node->bar_vma); + mutex_lock(&drm->ttm.io_reserve_mutex); + nouveau_ttm_io_mem_free_locked(drm, reg); + mutex_unlock(&drm->ttm.io_reserve_mutex); } -static int -nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_device *device = nv_device(drm->device); - u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; + struct nvkm_device *device = nvxx_device(drm); + u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT; + int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ - if (bo->mem.mem_type != TTM_PL_VRAM) { - if (nv_device(drm->device)->card_type < NV_50 || - !nouveau_bo_tile_layout(nvbo)) + if (bo->resource->mem_type != TTM_PL_VRAM) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || + !nvbo->kind) return 0; - } - /* make sure bo is in mappable vram */ - if (bo->mem.start + bo->mem.num_pages < mappable) - return 0; + if (bo->resource->mem_type != TTM_PL_SYSTEM) + return 0; + + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); + + } else { + /* make sure bo is in mappable vram */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || + bo->resource->start + PFN_UP(bo->resource->size) < mappable) + return 0; + for (i = 0; i < nvbo->placement.num_placement; ++i) { + nvbo->placements[i].fpfn = 0; + nvbo->placements[i].lpfn = mappable; + } + + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); + } + + ret = nouveau_bo_validate(nvbo, false, false); + if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(ret)) + return VM_FAULT_SIGBUS; - nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = mappable; - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); - return nouveau_bo_validate(nvbo, false, false); + ttm_bo_move_to_lru_tail_unlocked(bo); + return 0; } static int -nouveau_ttm_tt_populate(struct ttm_tt *ttm) +nouveau_ttm_tt_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct ttm_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; - struct drm_device *dev; - unsigned i; - int r; - bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); - if (ttm->state != tt_unpopulated) + if (ttm_tt_is_populated(ttm)) return 0; if (slave && ttm->sg) { - /* make userspace faulting work */ - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - ttm_dma->dma_address, ttm->num_pages); - ttm->state = tt_unbound; + drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, + ttm->num_pages); return 0; } - drm = nouveau_bdev(ttm->bdev); - dev = drm->dev; - -#if __OS_HAS_AGP - if (drm->agp.stat == ENABLED) { - return ttm_agp_tt_populate(ttm); - } -#endif - -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { - return ttm_dma_populate((void *)ttm, dev->dev); - } -#endif - - r = ttm_pool_populate(ttm); - if (r) { - return r; - } + drm = nouveau_bdev(bdev); - for (i = 0; i < ttm->num_pages; i++) { - ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], - 0, PAGE_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { - while (--i) { - pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - ttm_dma->dma_address[i] = 0; - } - ttm_pool_unpopulate(ttm); - return -EFAULT; - } - } - return 0; + return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); } static void -nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) +nouveau_ttm_tt_unpopulate(struct ttm_device *bdev, + struct ttm_tt *ttm) { - struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; - struct drm_device *dev; - unsigned i; - bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL); if (slave) return; - drm = nouveau_bdev(ttm->bdev); - dev = drm->dev; + nouveau_ttm_tt_unbind(bdev, ttm); -#if __OS_HAS_AGP - if (drm->agp.stat == ENABLED) { - ttm_agp_tt_unpopulate(ttm); - return; - } -#endif + drm = nouveau_bdev(bdev); -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { - ttm_dma_unpopulate((void *)ttm, dev->dev); + return ttm_pool_free(&drm->ttm.bdev.pool, ttm); +} + +static void +nouveau_ttm_tt_destroy(struct ttm_device *bdev, + struct ttm_tt *ttm) +{ +#if IS_ENABLED(CONFIG_AGP) + struct nouveau_drm *drm = nouveau_bdev(bdev); + if (drm->agp.bridge) { + ttm_agp_destroy(ttm); return; } #endif - - for (i = 0; i < ttm->num_pages; i++) { - if (ttm_dma->dma_address[i]) { - pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - } - } - - ttm_pool_unpopulate(ttm); + nouveau_sgdma_destroy(bdev, ttm); } void -nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) +nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) { - struct nouveau_fence *old_fence = NULL; - - if (likely(fence)) - nouveau_fence_ref(fence); + struct dma_resv *resv = nvbo->bo.base.resv; - spin_lock(&nvbo->bo.bdev->fence_lock); - old_fence = nvbo->bo.sync_obj; - nvbo->bo.sync_obj = fence; - spin_unlock(&nvbo->bo.bdev->fence_lock); + if (!fence) + return; - nouveau_fence_unref(&old_fence); + dma_resv_add_fence(resv, &fence->base, exclusive ? + DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); } static void -nouveau_bo_fence_unref(void **sync_obj) -{ - nouveau_fence_unref((struct nouveau_fence **)sync_obj); -} - -static void * -nouveau_bo_fence_ref(void *sync_obj) -{ - return nouveau_fence_ref(sync_obj); -} - -static bool -nouveau_bo_fence_signalled(void *sync_obj) +nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) { - return nouveau_fence_done(sync_obj); + nouveau_bo_move_ntfy(bo, NULL); } -static int -nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) -{ - return nouveau_fence_wait(sync_obj, lazy, intr); -} - -static int -nouveau_bo_fence_flush(void *sync_obj) -{ - return 0; -} - -struct ttm_bo_driver nouveau_bo_driver = { +struct ttm_device_funcs nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, - .invalidate_caches = nouveau_bo_invalidate_caches, - .init_mem_type = nouveau_bo_init_mem_type, + .ttm_tt_destroy = &nouveau_ttm_tt_destroy, + .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, - .move_notify = nouveau_bo_move_ntfy, + .delete_mem_notify = nouveau_bo_delete_mem_notify, .move = nouveau_bo_move, - .verify_access = nouveau_bo_verify_access, - .sync_obj_signaled = nouveau_bo_fence_signalled, - .sync_obj_wait = nouveau_bo_fence_wait, - .sync_obj_flush = nouveau_bo_fence_flush, - .sync_obj_unref = nouveau_bo_fence_unref, - .sync_obj_ref = nouveau_bo_fence_ref, - .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, }; - -struct nouveau_vma * -nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) -{ - struct nouveau_vma *vma; - list_for_each_entry(vma, &nvbo->vma_list, head) { - if (vma->vm == vm) - return vma; - } - - return NULL; -} - -int -nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, - struct nouveau_vma *vma) -{ - const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; - struct nouveau_mem *node = nvbo->bo.mem.mm_node; - int ret; - - ret = nouveau_vm_get(vm, size, nvbo->page_shift, - NV_MEM_ACCESS_RW, vma); - if (ret) - return ret; - - if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) - nouveau_vm_map(vma, nvbo->bo.mem.mm_node); - else if (nvbo->bo.mem.mem_type == TTM_PL_TT) { - if (node->sg) - nouveau_vm_map_sg_table(vma, 0, size, node); - else - nouveau_vm_map_sg(vma, 0, size, node); - } - - list_add_tail(&vma->head, &nvbo->vma_list); - vma->refcount = 1; - return 0; -} - -void -nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) -{ - if (vma->node) { - if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) - nouveau_vm_unmap(vma); - nouveau_vm_put(vma); - list_del(&vma->head); - } -} |
