summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_dmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_dmem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c39
1 files changed, 16 insertions, 23 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e75..ca4932a150e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -88,7 +88,8 @@ struct nouveau_dmem {
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+ return container_of(page_pgmap(page), struct nouveau_dmem_chunk,
+ pagemap);
}
static struct nouveau_drm *page_to_drm(struct page *page)
@@ -193,7 +194,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done;
- dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
+ dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
if (!dpage)
goto done;
@@ -255,20 +256,15 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
chunk->pagemap.owner = drm->dev;
- ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
- &chunk->bo);
+ ret = nouveau_bo_new_pin(&drm->client, NOUVEAU_GEM_DOMAIN_VRAM, DMEM_CHUNK_SIZE,
+ &chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
- if (ret)
- goto out_bo_free;
-
ptr = memremap_pages(&chunk->pagemap, numa_node_id());
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
- goto out_bo_unpin;
+ goto out_bo_free;
}
mutex_lock(&drm->dmem->mutex);
@@ -291,10 +287,8 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
return 0;
-out_bo_unpin:
- nouveau_bo_unpin(chunk->bo);
out_bo_free:
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
out_release:
release_mem_region(chunk->pagemap.range.start, range_len(&chunk->pagemap.range));
out_free:
@@ -378,9 +372,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -406,11 +400,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kfree(dma_addrs);
+ kvfree(dma_addrs);
}
void
@@ -425,8 +419,7 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
nouveau_dmem_evict_chunk(chunk);
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
+ nouveau_bo_unpin_del(&chunk->bo);
WARN_ON(chunk->callocated);
list_del(&chunk->list);
memunmap_pages(&chunk->pagemap);
@@ -443,7 +436,7 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper dst_aper, u64 dst_addr,
enum nouveau_aper src_aper, u64 src_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;
@@ -516,7 +509,7 @@ static int
nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
enum nouveau_aper dst_aper, u64 dst_addr)
{
- struct nvif_push *push = drm->dmem->migrate.chan->chan.push;
+ struct nvif_push *push = &drm->dmem->migrate.chan->chan.push;
u32 launch_dma = 0;
int ret;