diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 15:49:32 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-09-04 15:49:32 -0700 |
commit | f377ea88b862bf7151be96d276f4cb740f8e1c41 (patch) | |
tree | 6205913431c012e285316281b6221a20d4a92128 /drivers/gpu/drm/nouveau/nouveau_bo.c | |
parent | 51e771c0d25b43d0f12b2c7c01939942becbbe28 (diff) | |
parent | 73bf1b7be7aab60d7c651402441dd0b0b4991098 (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"This is the main pull request for the drm for 4.3. Nouveau is
probably the biggest amount of changes in here, since it missed 4.2.
Highlights below, along with the usual bunch of fixes.
All stuff outside drm should have applicable acks.
Highlights:
- new drivers:
freescale dcu kms driver
- core:
more atomic fixes
disable some dri1 interfaces on kms drivers
drop fb panic handling, this was just getting more broken, as more locking was required.
new core fbdev Kconfig support - instead of each driver enable/disabling it
struct_mutex cleanups
- panel:
more new panels
cleanup Kconfig
- i915:
Skylake support enabled by default
legacy modesetting using atomic infrastructure
Skylake fixes
GEN9 workarounds
- amdgpu:
Fiji support
CGS support for amdgpu
Initial GPU scheduler - off by default
Lots of bug fixes and optimisations.
- radeon:
DP fixes
misc fixes
- amdkfd:
Add Carrizo support for amdkfd using amdgpu.
- nouveau:
long pending cleanup to complete driver,
fully bisectable which makes it larger,
perfmon work
more reclocking improvements
maxwell displayport fixes
- vmwgfx:
new DX device support, supports OpenGL 3.3
screen targets support
- mgag200:
G200eW support
G200e new revision support
- msm:
dragonboard 410c support, msm8x94 support, msm8x74v1 support
yuv format support
dma plane support
mdp5 rotation
initial hdcp
- sti:
atomic support
- exynos:
lots of cleanups
atomic modesetting/pageflipping support
render node support
- tegra:
tegra210 support (dc, dsi, dp/hdmi)
dpms with atomic modesetting support
- atmel:
support for 3 more atmel SoCs
new input formats, PRIME support.
- dwhdmi:
preparing to add audio support
- rockchip:
yuv plane support"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1369 commits)
drm/amdgpu: rename gmc_v8_0_init_compute_vmid
drm/amdgpu: fix vce3 instance handling
drm/amdgpu: remove ib test for the second VCE Ring
drm/amdgpu: properly enable VM fault interrupts
drm/amdgpu: fix warning in scheduler
drm/amdgpu: fix buffer placement under memory pressure
drm/amdgpu/cz: fix cz_dpm_update_low_memory_pstate logic
drm/amdgpu: fix typo in dce11 watermark setup
drm/amdgpu: fix typo in dce10 watermark setup
drm/amdgpu: use top down allocation for non-CPU accessible vram
drm/amdgpu: be explicit about cpu vram access for driver BOs (v2)
drm/amdgpu: set MEC doorbell range for Fiji
drm/amdgpu: implement burst NOP for SDMA
drm/amdgpu: add insert_nop ring func and default implementation
drm/amdgpu: add amdgpu_get_sdma_instance helper function
drm/amdgpu: add AMDGPU_MAX_SDMA_INSTANCES
drm/amdgpu: add burst_nop flag for sdma
drm/amdgpu: add count field for the SDMA NOP packet v2
drm/amdgpu: use PT for VM sync on unmap
drm/amdgpu: make wait_event uninterruptible in push_job
...
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 84 |
1 files changed, 41 insertions, 43 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6edcce1658b7..15057b39491c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -48,24 +48,19 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nvkm_fb *pfb = nvxx_fb(&drm->device); - struct nvkm_fb_tile *tile = &pfb->tile.region[i]; - struct nvkm_engine *engine; + struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_fb *fb = device->fb; + struct nvkm_fb_tile *tile = &fb->tile.region[i]; nouveau_fence_unref(®->fence); if (tile->pitch) - pfb->tile.fini(pfb, i, tile); + nvkm_fb_tile_fini(fb, i, tile); if (pitch) - pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); - - pfb->tile.prog(pfb, i, tile); + nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); - if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_GR))) - engine->tile_prog(engine, i); - if ((engine = nvkm_engine(pfb, NVDEV_ENGINE_MPEG))) - engine->tile_prog(engine, i); + nvkm_fb_tile_prog(fb, i, tile); } static struct nouveau_drm_tile * @@ -105,18 +100,18 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, u32 size, u32 pitch, u32 flags) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_fb *pfb = nvxx_fb(&drm->device); + struct nvkm_fb *fb = nvxx_fb(&drm->device); struct nouveau_drm_tile *tile, *found = NULL; int i; - for (i = 0; i < pfb->tile.regions; i++) { + for (i = 0; i < fb->tile.regions; i++) { tile = nv10_bo_get_tile_region(dev, i); if (pitch && !found) { found = tile; continue; - } else if (tile && pfb->tile.region[i].pitch) { + } else if (tile && fb->tile.region[i].pitch) { /* Kill an unused tile region. */ nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); } @@ -214,7 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; - if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device))) + if (!nvxx_device(&drm->device)->func->cpu_coherent) nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; nvbo->page_shift = 12; @@ -471,8 +466,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) return; for (i = 0; i < ttm_dma->ttm.num_pages; i++) - dma_sync_single_for_device(nv_device_base(device), - ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE); + dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i], + PAGE_SIZE, DMA_TO_DEVICE); } void @@ -491,8 +486,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) return; for (i = 0; i < ttm_dma->ttm.num_pages; i++) - dma_sync_single_for_cpu(nv_device_base(device), - ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i], + PAGE_SIZE, DMA_FROM_DEVICE); } int @@ -581,10 +576,9 @@ nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, { #if __OS_HAS_AGP struct nouveau_drm *drm = nouveau_bdev(bdev); - struct drm_device *dev = drm->dev; - if (drm->agp.stat == ENABLED) { - return ttm_agp_tt_create(bdev, dev->agp->bridge, size, + if (drm->agp.bridge) { + return ttm_agp_tt_create(bdev, drm->agp.bridge, size, page_flags, dummy_read); } #endif @@ -636,12 +630,12 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) man->func = &nouveau_gart_manager; else - if (drm->agp.stat != ENABLED) + if (!drm->agp.bridge) man->func = &nv04_gart_manager; else man->func = &ttm_bo_manager_func; - if (drm->agp.stat == ENABLED) { + if (drm->agp.bridge) { man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; @@ -1064,7 +1058,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; - struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base); + struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_fence *fence; int ret; @@ -1104,7 +1098,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) static const struct { const char *name; int engine; - u32 oclass; + s32 oclass; int (*exec)(struct nouveau_channel *, struct ttm_buffer_object *, struct ttm_mem_reg *, struct ttm_mem_reg *); @@ -1137,7 +1131,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) if (chan == NULL) continue; - ret = nvif_object_init(chan->object, NULL, + ret = nvif_object_init(&chan->user, mthd->oclass | (mthd->engine << 16), mthd->oclass, NULL, 0, &drm->ttm.copy); @@ -1356,6 +1350,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nvkm_device *device = nvxx_device(&drm->device); struct nvkm_mem *node = mem->mm_node; int ret; @@ -1372,10 +1367,10 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return 0; case TTM_PL_TT: #if __OS_HAS_AGP - if (drm->agp.stat == ENABLED) { + if (drm->agp.bridge) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = drm->agp.base; - mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture; + mem->bus.is_iomem = !drm->agp.cma; } #endif if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) @@ -1384,16 +1379,20 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) /* fallthrough, tiled memory */ case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1); + mem->bus.base = device->func->resource_addr(device, 1); mem->bus.is_iomem = true; if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { struct nvkm_bar *bar = nvxx_bar(&drm->device); + int page_shift = 12; + if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) + page_shift = node->page_shift; - ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, - &node->bar_vma); + ret = nvkm_bar_umap(bar, node->size << 12, page_shift, + &node->bar_vma); if (ret) return ret; + nvkm_vm_map(&node->bar_vma, node); mem->bus.offset = node->bar_vma.offset; } break; @@ -1406,14 +1405,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { - struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nvkm_bar *bar = nvxx_bar(&drm->device); struct nvkm_mem *node = mem->mm_node; if (!node->bar_vma.node) return; - bar->unmap(bar, &node->bar_vma); + nvkm_vm_unmap(&node->bar_vma); + nvkm_vm_put(&node->bar_vma); } static int @@ -1421,8 +1419,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nvif_device *device = &drm->device; - u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT; + struct nvkm_device *device = nvxx_device(&drm->device); + u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; int i, ret; /* as long as the bo isn't in vram, and isn't tiled, we've got @@ -1488,18 +1486,18 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) drm = nouveau_bdev(ttm->bdev); device = nvxx_device(&drm->device); dev = drm->dev; - pdev = nv_device_base(device); + pdev = device->dev; /* * Objects matching this condition have been marked as force_coherent, * so use the DMA API for them. */ - if (!nv_device_is_cpu_coherent(device) && + if (!nvxx_device(&drm->device)->func->cpu_coherent && ttm->caching_state == tt_uncached) return ttm_dma_populate(ttm_dma, dev->dev); #if __OS_HAS_AGP - if (drm->agp.stat == ENABLED) { + if (drm->agp.bridge) { return ttm_agp_tt_populate(ttm); } #endif @@ -1553,20 +1551,20 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) drm = nouveau_bdev(ttm->bdev); device = nvxx_device(&drm->device); dev = drm->dev; - pdev = nv_device_base(device); + pdev = device->dev; /* * Objects matching this condition have been marked as force_coherent, * so use the DMA API for them. */ - if (!nv_device_is_cpu_coherent(device) && + if (!nvxx_device(&drm->device)->func->cpu_coherent && ttm->caching_state == tt_uncached) { ttm_dma_unpopulate(ttm_dma, dev->dev); return; } #if __OS_HAS_AGP - if (drm->agp.stat == ENABLED) { + if (drm->agp.bridge) { ttm_agp_tt_unpopulate(ttm); return; } |