summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nvkm/subdev/fb
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-10-06 16:49:29 +0100
committerBen Skeggs <bskeggs@redhat.com>2016-10-12 17:29:33 +1000
commitebf7655aebe6a4e339a269130b399f5f7b0bf4b9 (patch)
tree63ec8f421f3dea3b0a29291dc8f6a9863f5e5cf5 /drivers/gpu/drm/nouveau/nvkm/subdev/fb
parent38f5359354d487f2492a3cdec862682c8b15e216 (diff)
drm/nouveau/fb/gf100: defer DMA mapping of scratch page to oneinit() hook
The 100c10 scratch page is mapped using dma_map_page() before the TTM layer has had a chance to set the DMA mask. This means we are still running with the default of 32 when this code executes, and this causes problems for platforms with no memory below 4 GB (such as AMD Seattle) So move the dma_map_page() to the .oneinit hook, which executes after the DMA mask has been set. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/fb')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 76433cc66fff..3841ad6be99e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -50,24 +50,33 @@ gf100_fb_intr(struct nvkm_fb *base)
}
int
-gf100_fb_oneinit(struct nvkm_fb *fb)
+gf100_fb_oneinit(struct nvkm_fb *base)
{
- struct nvkm_device *device = fb->subdev.device;
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
int ret, size = 0x1000;
size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
size = min(size, 0x1000);
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_rd);
+ false, &fb->base.mmu_rd);
if (ret)
return ret;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
- false, &fb->mmu_wr);
+ false, &fb->base.mmu_wr);
if (ret)
return ret;
+ fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (fb->r100c10_page) {
+ fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device->dev, fb->r100c10))
+ return -EFAULT;
+ }
+
return 0;
}
@@ -123,14 +132,6 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
nvkm_fb_ctor(func, device, index, &fb->base);
*pfb = &fb->base;
- fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c10_page) {
- fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c10))
- return -EFAULT;
- }
-
return 0;
}