summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
diff options
context:
space:
mode:
authorRalph Campbell <rcampbell@nvidia.com>2020-07-23 15:29:59 -0700
committerJason Gunthorpe <jgg@nvidia.com>2020-07-28 15:48:42 -0300
commit1a77decd0caed0bc5b6f5e474981d574076f0841 (patch)
tree2c2c9cecf6faea2b21bbe38ce10be0e7574182bb /drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
parentb223555dc4ed6efc321d23e78e55197cf0080ef3 (diff)
nouveau: fix storing invalid ptes
When migrating a range of system memory to device private memory, some of the pages in the address range may not be migrating. In this case, the non migrating pages won't have a new GPU MMU entry to store but the nvif_object_ioctl() NVIF_VMM_V0_PFNMAP method doesn't check the input and stores a bad valid GPU page table entry. Fix this by skipping the invalid input PTEs when updating the GPU page tables. Link: https://lore.kernel.org/r/20200723223004.9586-2-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index ed37fddd063f..7eabe9fe0d2b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -79,8 +79,12 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
dma_addr_t addr;
nvkm_kmap(pt->memory);
- while (ptes--) {
+ for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
+
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
@@ -100,7 +104,6 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
}
VMM_WO064(pt, vmm, ptei++ * 8, data);
- map->pfn++;
}
nvkm_done(pt->memory);
}
@@ -310,9 +313,12 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
dma_addr_t addr;
nvkm_kmap(pt->memory);
- while (ptes--) {
+ for (; ptes; ptes--, map->pfn++) {
u64 data = 0;
+ if (!(*map->pfn & NVKM_VMM_PFN_V))
+ continue;
+
if (!(*map->pfn & NVKM_VMM_PFN_W))
data |= BIT_ULL(6); /* RO. */
@@ -332,7 +338,6 @@ gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
}
VMM_WO064(pt, vmm, ptei++ * 16, data);
- map->pfn++;
}
nvkm_done(pt->memory);
}