diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c | 197 |
1 files changed, 152 insertions, 45 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c index ae793f400ba1..eb5fcadcb39a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c @@ -677,40 +677,17 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) } static void -nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, - u64 addr, u64 size, bool sparse, bool pfn) -{ - const struct nvkm_vmm_desc_func *func = page->desc->func; - nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref", - false, pfn, nvkm_vmm_unref_ptes, NULL, NULL, - sparse ? func->sparse : func->invalid ? func->invalid : - func->unmap); -} - -static int -nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, - u64 addr, u64 size, struct nvkm_vmm_map *map, - nvkm_vmm_pte_func func) -{ - u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true, - false, nvkm_vmm_ref_ptes, func, map, NULL); - if (fail != ~0ULL) { - if ((size = fail - addr)) - nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false); - return -ENOMEM; - } - return 0; -} - -static void nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, bool sparse, bool pfn) { const struct nvkm_vmm_desc_func *func = page->desc->func; + + mutex_lock(&vmm->mutex.map); nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn, NULL, NULL, NULL, sparse ? func->sparse : func->invalid ? func->invalid : func->unmap); + mutex_unlock(&vmm->mutex.map); } static void @@ -718,33 +695,108 @@ nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size, struct nvkm_vmm_map *map, nvkm_vmm_pte_func func) { + mutex_lock(&vmm->mutex.map); nvkm_vmm_iter(vmm, page, addr, size, "map", false, false, NULL, func, map, NULL); + mutex_unlock(&vmm->mutex.map); } static void -nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, - u64 addr, u64 size) +nvkm_vmm_ptes_put_locked(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, + u64 addr, u64 size) { nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false, nvkm_vmm_unref_ptes, NULL, NULL, NULL); } +static void +nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, + u64 addr, u64 size) +{ + mutex_lock(&vmm->mutex.ref); + nvkm_vmm_ptes_put_locked(vmm, page, addr, size); + mutex_unlock(&vmm->mutex.ref); +} + static int nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, u64 addr, u64 size) { - u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false, - nvkm_vmm_ref_ptes, NULL, NULL, NULL); + u64 fail; + + mutex_lock(&vmm->mutex.ref); + fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false, + nvkm_vmm_ref_ptes, NULL, NULL, NULL); if (fail != ~0ULL) { if (fail != addr) - nvkm_vmm_ptes_put(vmm, page, addr, fail - addr); + nvkm_vmm_ptes_put_locked(vmm, page, addr, fail - addr); + mutex_unlock(&vmm->mutex.ref); + return -ENOMEM; + } + mutex_unlock(&vmm->mutex.ref); + return 0; +} + +static void +__nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, + u64 addr, u64 size, bool sparse, bool pfn) +{ + const struct nvkm_vmm_desc_func *func = page->desc->func; + + nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref", + false, pfn, nvkm_vmm_unref_ptes, NULL, NULL, + sparse ? func->sparse : func->invalid ? func->invalid : + func->unmap); +} + +static void +nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, + u64 addr, u64 size, bool sparse, bool pfn) +{ + if (vmm->managed.raw) { + nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, pfn); + nvkm_vmm_ptes_put(vmm, page, addr, size); + } else { + __nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, sparse, pfn); + } +} + +static int +__nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, + u64 addr, u64 size, struct nvkm_vmm_map *map, + nvkm_vmm_pte_func func) +{ + u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true, + false, nvkm_vmm_ref_ptes, func, map, NULL); + if (fail != ~0ULL) { + if ((size = fail - addr)) + nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false); return -ENOMEM; } return 0; } -static inline struct nvkm_vma * +static int +nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page, + u64 addr, u64 size, struct nvkm_vmm_map *map, + nvkm_vmm_pte_func func) +{ + int ret; + + if (vmm->managed.raw) { + ret = nvkm_vmm_ptes_get(vmm, page, addr, size); + if (ret) + return ret; + + nvkm_vmm_ptes_map(vmm, page, addr, size, map, func); + + return 0; + } else { + return __nvkm_vmm_ptes_get_map(vmm, page, addr, size, map, func); + } +} + +struct nvkm_vma * nvkm_vma_new(u64 addr, u64 size) { struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); @@ -1045,7 +1097,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, vmm->debug = mmu->subdev.debug; kref_init(&vmm->kref); - __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key); + __mutex_init(&vmm->mutex.vmm, "&vmm->mutex.vmm", key ? key : &_key); + mutex_init(&vmm->mutex.ref); + mutex_init(&vmm->mutex.map); /* Locate the smallest page size supported by the backend, it will * have the deepest nesting of page tables. @@ -1101,6 +1155,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr))) return ret; + vmm->managed.p.addr = 0; + vmm->managed.p.size = addr; + /* NVKM-managed area. */ if (size) { if (!(vma = nvkm_vma_new(addr, size))) @@ -1114,6 +1171,9 @@ nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu, size = vmm->limit - addr; if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size))) return ret; + + vmm->managed.n.addr = addr; + vmm->managed.n.size = size; } else { /* Address-space fully managed by NVKM, requiring calls to * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space. @@ -1362,9 +1422,9 @@ void nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma) { if (vma->memory) { - mutex_lock(&vmm->mutex); + mutex_lock(&vmm->mutex.vmm); nvkm_vmm_unmap_locked(vmm, vma, false); - mutex_unlock(&vmm->mutex); + mutex_unlock(&vmm->mutex.vmm); } } @@ -1423,6 +1483,8 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, nvkm_vmm_pte_func func; int ret; + map->no_comp = vma->no_comp; + /* Make sure we won't overrun the end of the memory object. */ if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) { VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx", @@ -1507,10 +1569,15 @@ nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc, struct nvkm_vmm_map *map) { int ret; - mutex_lock(&vmm->mutex); + + if (nvkm_vmm_in_managed_range(vmm, vma->addr, vma->size) && + vmm->managed.raw) + return nvkm_vmm_map_locked(vmm, vma, argv, argc, map); + + mutex_lock(&vmm->mutex.vmm); ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); vma->busy = false; - mutex_unlock(&vmm->mutex); + mutex_unlock(&vmm->mutex.vmm); return ret; } @@ -1620,9 +1687,9 @@ nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma) { struct nvkm_vma *vma = *pvma; if (vma) { - mutex_lock(&vmm->mutex); + mutex_lock(&vmm->mutex.vmm); nvkm_vmm_put_locked(vmm, vma); - mutex_unlock(&vmm->mutex); + mutex_unlock(&vmm->mutex.vmm); *pvma = NULL; } } @@ -1769,9 +1836,49 @@ int nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma) { int ret; - mutex_lock(&vmm->mutex); + mutex_lock(&vmm->mutex.vmm); ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma); - mutex_unlock(&vmm->mutex); + mutex_unlock(&vmm->mutex.vmm); + return ret; +} + +void +nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size, + bool sparse, u8 refd) +{ + const struct nvkm_vmm_page *page = &vmm->func->page[refd]; + + nvkm_vmm_ptes_unmap(vmm, page, addr, size, sparse, false); +} + +void +nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd) +{ + const struct nvkm_vmm_page *page = vmm->func->page; + + nvkm_vmm_ptes_put(vmm, &page[refd], addr, size); +} + +int +nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd) +{ + const struct nvkm_vmm_page *page = vmm->func->page; + + if (unlikely(!size)) + return -EINVAL; + + return nvkm_vmm_ptes_get(vmm, &page[refd], addr, size); +} + +int +nvkm_vmm_raw_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref) +{ + int ret; + + mutex_lock(&vmm->mutex.ref); + ret = nvkm_vmm_ptes_sparse(vmm, addr, size, ref); + mutex_unlock(&vmm->mutex.ref); + return ret; } @@ -1779,9 +1886,9 @@ void nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) { if (inst && vmm && vmm->func->part) { - mutex_lock(&vmm->mutex); + mutex_lock(&vmm->mutex.vmm); vmm->func->part(vmm, inst); - mutex_unlock(&vmm->mutex); + mutex_unlock(&vmm->mutex.vmm); } } @@ -1790,9 +1897,9 @@ nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) { int ret = 0; if (vmm->func->join) { - mutex_lock(&vmm->mutex); + mutex_lock(&vmm->mutex.vmm); ret = vmm->func->join(vmm, inst); - mutex_unlock(&vmm->mutex); + mutex_unlock(&vmm->mutex.vmm); } return ret; } |