diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-02 07:59:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-02 07:59:23 -0700 |
commit | 320b164abb32db876866a4ff8c2cb710524ac6ea (patch) | |
tree | 1f79119cde6e24c9f1d01fb1e51252bca7c4cdd5 /drivers/gpu/drm/i915/i915_gem_gtt.c | |
parent | 0adb32858b0bddf4ada5f364a84ed60b196dbcda (diff) | |
parent | 694f54f680f7fd8e9561928fbfc537d9afbc3d79 (diff) |
Merge tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Cannonlake and Vega12 support are probably the two major things. This
pull lacks nouveau, Ben had some unforseen leave and a few other
blockers so we'll see how things look or maybe leave it for this merge
window.
core:
- Device links to handle sound/gpu pm dependency
- Color encoding/range properties
- Plane clipping into plane check helper
- Backlight helpers
- DP TP4 + HBR3 helper support
amdgpu:
- Vega12 support
- Enable DC by default on all supported GPUs
- Powerplay restructuring and cleanup
- DC bandwidth calc updates
- DC backlight on pre-DCE11
- TTM backing store dropping support
- SR-IOV fixes
- Adding "wattman" like functionality
- DC crc support
- Improved DC dual-link handling
amdkfd:
- GPUVM support for dGPU
- KFD events for dGPU
- Enable PCIe atomics for dGPUs
- HSA process eviction support
- Live-lock fixes for process eviction
- VM page table allocation fix for large-bar systems
panel:
- Raydium RM68200
- AUO G104SN02 V2
- KEO TX31D200VM0BAA
- ARM Versatile panels
i915:
- Cannonlake support enabled
- AUX-F port support added
- Icelake base enabling until internal milestone of forcewake support
- Query uAPI interface (used for GPU topology information currently)
- Compressed framebuffer support for sprites
- kmem cache shrinking when GPU is idle
- Avoid boosting GPU when waited item is being processed already
- Avoid retraining LSPCON link unnecessarily
- Decrease request signaling latency
- Deprecation of I915_SET_COLORKEY_NONE
- Kerneldoc and compiler warning cleanup for upcoming CI enforcements
- Full range ycbcr toggling
- HDCP support
i915/gvt:
- Big refactor for shadow ppgtt
- KBL context save/restore via LRI cmd (Weinan)
- Properly unmap dma for guest page (Changbin)
vmwgfx:
- Lots of various improvements
etnaviv:
- Use the drm gpu scheduler
- prep work for GC7000L support
vc4:
- fix alpha blending
- Expose perf counters to userspace
pl111:
- Bandwidth checking/limiting
- Versatile panel support
sun4i:
- A83T HDMI support
- A80 support
- YUV plane support
- H3/H5 HDMI support
omapdrm:
- HPD support for DVI connector
- remove lots of static variables
msm:
- DSI updates from 10nm / SDM845
- fix for race condition with a3xx/a4xx fence completion irq
- some refactoring/prep work for eventual a6xx support (ie. when we
have a userspace)
- a5xx debugfs enhancements
- some mdp5 fixes/cleanups to prepare for eventually merging
writeback
- support (ie. when we have a userspace)
tegra:
- mmap() fixes for fbdev devices
- Overlay plane for hw cursor fix
- dma-buf cache maintenance support
mali-dp:
- YUV->RGB conversion support
rockchip:
- rk3399/chromebook fixes and improvements
rcar-du:
- LVDS support move to drm bridge
- DT bindings for R8A77995
- Driver/DT support for R8A77970
tilcdc:
- DRM panel support"
* tag 'drm-for-v4.17' of git://people.freedesktop.org/~airlied/linux: (1646 commits)
drm/i915: Fix hibernation with ACPI S0 target state
drm/i915/execlists: Use a locked clear_bit() for synchronisation with interrupt
drm/i915: Specify which engines to reset following semaphore/event lockups
drm/i915/dp: Write to SET_POWER dpcd to enable MST hub.
drm/amdkfd: Use ordered workqueue to restore processes
drm/amdgpu: Fix acquiring VM on large-BAR systems
drm/amd/pp: clean header file hwmgr.h
drm/amd/pp: use mlck_table.count for array loop index limit
drm: Fix uabi regression by allowing garbage mode->type from userspace
drm/amdgpu: Add an ATPX quirk for hybrid laptop
drm/amdgpu: fix spelling mistake: "asssert" -> "assert"
drm/amd/pp: Add new asic support in pp_psm.c
drm/amd/pp: Clean up powerplay code on Vega12
drm/amd/pp: Add smu irq handlers for legacy asics
drm/amd/pp: Fix set wrong temperature range on smu7
drm/amdgpu: Don't change preferred domian when fallback GTT v5
drm/vmwgfx: Bump version patchlevel and date
drm/vmwgfx: use monotonic event timestamps
drm/vmwgfx: Unpin the screen object backup buffer when not used
drm/vmwgfx: Stricter count of legacy surface device resources
...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 152 |
1 files changed, 70 insertions, 82 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7e403eaa9e0f..21d72f695adb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -543,9 +543,7 @@ static void fill_page_dma_32(struct i915_address_space *vm, static int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) { - struct page *page = NULL; - dma_addr_t addr; - int order; + unsigned long size; /* * In order to utilize 64K pages for an object with a size < 2M, we will @@ -559,48 +557,47 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) * TODO: we should really consider write-protecting the scratch-page and * sharing between ppgtt */ + size = I915_GTT_PAGE_SIZE_4K; if (i915_vm_is_48bit(vm) && HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { - order = get_order(I915_GTT_PAGE_SIZE_64K); - page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order); - if (page) { - addr = dma_map_page(vm->dma, page, 0, - I915_GTT_PAGE_SIZE_64K, - PCI_DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(vm->dma, addr))) { - __free_pages(page, order); - page = NULL; - } - - if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) { - dma_unmap_page(vm->dma, addr, - I915_GTT_PAGE_SIZE_64K, - PCI_DMA_BIDIRECTIONAL); - __free_pages(page, order); - page = NULL; - } - } + size = I915_GTT_PAGE_SIZE_64K; + gfp |= __GFP_NOWARN; } + gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; + + do { + int order = get_order(size); + struct page *page; + dma_addr_t addr; - if (!page) { - order = 0; - page = alloc_page(gfp | __GFP_ZERO); + page = alloc_pages(gfp, order); if (unlikely(!page)) - return -ENOMEM; + goto skip; - addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE, + addr = dma_map_page(vm->dma, page, 0, size, PCI_DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(vm->dma, addr))) { - __free_page(page); - return -ENOMEM; - } - } + if (unlikely(dma_mapping_error(vm->dma, addr))) + goto free_page; - vm->scratch_page.page = page; - vm->scratch_page.daddr = addr; - vm->scratch_page.order = order; + if (unlikely(!IS_ALIGNED(addr, size))) + goto unmap_page; - return 0; + vm->scratch_page.page = page; + vm->scratch_page.daddr = addr; + vm->scratch_page.order = order; + return 0; + +unmap_page: + dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL); +free_page: + __free_pages(page, order); +skip: + if (size == I915_GTT_PAGE_SIZE_4K) + return -ENOMEM; + + size = I915_GTT_PAGE_SIZE_4K; + gfp &= ~__GFP_NOWARN; + } while (1); } static void cleanup_scratch_page(struct i915_address_space *vm) @@ -676,27 +673,22 @@ static void free_pd(struct i915_address_space *vm, static void gen8_initialize_pd(struct i915_address_space *vm, struct i915_page_directory *pd) { - unsigned int i; - fill_px(vm, pd, gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); - for (i = 0; i < I915_PDES; i++) - pd->page_table[i] = vm->scratch_pt; + memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES); } static int __pdp_init(struct i915_address_space *vm, struct i915_page_directory_pointer *pdp) { const unsigned int pdpes = i915_pdpes_per_pdp(vm); - unsigned int i; pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), GFP_KERNEL | __GFP_NOWARN); if (unlikely(!pdp->page_directory)) return -ENOMEM; - for (i = 0; i < pdpes; i++) - pdp->page_directory[i] = vm->scratch_pd; + memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes); return 0; } @@ -718,7 +710,7 @@ alloc_pdp(struct i915_address_space *vm) struct i915_page_directory_pointer *pdp; int ret = -ENOMEM; - WARN_ON(!use_4lvl(vm)); + GEM_BUG_ON(!use_4lvl(vm)); pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); if (!pdp) @@ -767,25 +759,22 @@ static void gen8_initialize_pdp(struct i915_address_space *vm, static void gen8_initialize_pml4(struct i915_address_space *vm, struct i915_pml4 *pml4) { - unsigned int i; - fill_px(vm, pml4, gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); - for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) - pml4->pdps[i] = vm->scratch_pdp; + memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); } /* Broadwell Page Directory Pointer Descriptors */ -static int gen8_write_pdp(struct drm_i915_gem_request *req, +static int gen8_write_pdp(struct i915_request *rq, unsigned entry, dma_addr_t addr) { - struct intel_engine_cs *engine = req->engine; + struct intel_engine_cs *engine = rq->engine; u32 *cs; BUG_ON(entry >= 4); - cs = intel_ring_begin(req, 6); + cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -795,20 +784,20 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry)); *cs++ = lower_32_bits(addr); - intel_ring_advance(req, cs); + intel_ring_advance(rq, cs); return 0; } static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) + struct i915_request *rq) { int i, ret; for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - ret = gen8_write_pdp(req, i, pd_daddr); + ret = gen8_write_pdp(rq, i, pd_daddr); if (ret) return ret; } @@ -817,9 +806,9 @@ static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, } static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) + struct i915_request *rq) { - return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); + return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4)); } /* PDE TLBs are a pain to invalidate on GEN8+. When we modify @@ -1743,13 +1732,13 @@ static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt) } static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) + struct i915_request *rq) { - struct intel_engine_cs *engine = req->engine; + struct intel_engine_cs *engine = rq->engine; u32 *cs; /* NB: TLBs must be flushed and invalidated before a switch */ - cs = intel_ring_begin(req, 6); + cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -1759,19 +1748,19 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); *cs++ = get_pd_offset(ppgtt); *cs++ = MI_NOOP; - intel_ring_advance(req, cs); + intel_ring_advance(rq, cs); return 0; } static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) + struct i915_request *rq) { - struct intel_engine_cs *engine = req->engine; + struct intel_engine_cs *engine = rq->engine; u32 *cs; /* NB: TLBs must be flushed and invalidated before a switch */ - cs = intel_ring_begin(req, 6); + cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -1781,16 +1770,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); *cs++ = get_pd_offset(ppgtt); *cs++ = MI_NOOP; - intel_ring_advance(req, cs); + intel_ring_advance(rq, cs); return 0; } static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, - struct drm_i915_gem_request *req) + struct i915_request *rq) { - struct intel_engine_cs *engine = req->engine; - struct drm_i915_private *dev_priv = req->i915; + struct intel_engine_cs *engine = rq->engine; + struct drm_i915_private *dev_priv = rq->i915; I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); @@ -2112,7 +2101,7 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, ppgtt->base.i915 = dev_priv; ppgtt->base.dma = &dev_priv->drm.pdev->dev; - if (INTEL_INFO(dev_priv)->gen < 8) + if (INTEL_GEN(dev_priv) < 8) return gen6_ppgtt_init(ppgtt); else return gen8_ppgtt_init(ppgtt); @@ -2260,9 +2249,9 @@ void i915_ppgtt_release(struct kref *kref) trace_i915_ppgtt_release(&ppgtt->base); /* vmas should already be unbound and destroyed */ - WARN_ON(!list_empty(&ppgtt->base.active_list)); - WARN_ON(!list_empty(&ppgtt->base.inactive_list)); - WARN_ON(!list_empty(&ppgtt->base.unbound_list)); + GEM_BUG_ON(!list_empty(&ppgtt->base.active_list)); + GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list)); + GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list)); ppgtt->base.cleanup(&ppgtt->base); i915_address_space_fini(&ppgtt->base); @@ -2370,9 +2359,10 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { do { - if (dma_map_sg(&obj->base.dev->pdev->dev, - pages->sgl, pages->nents, - PCI_DMA_BIDIRECTIONAL)) + if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, + pages->sgl, pages->nents, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_NO_WARN)) return 0; /* If the DMA remap fails, one cause can be that we have @@ -2824,10 +2814,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) i915->mm.aliasing_ppgtt = ppgtt; - WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); + GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma); ggtt->base.bind_vma = aliasing_gtt_bind_vma; - WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); + GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); ggtt->base.unbind_vma = aliasing_gtt_unbind_vma; return 0; @@ -2918,7 +2908,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->base.closed = true; mutex_lock(&dev_priv->drm.struct_mutex); - WARN_ON(!list_empty(&ggtt->base.active_list)); + GEM_BUG_ON(!list_empty(&ggtt->base.active_list)); list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link) WARN_ON(i915_vma_unbind(vma)); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -3811,6 +3801,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); switch (vma->ggtt_view.type) { + default: + GEM_BUG_ON(vma->ggtt_view.type); + /* fall through */ case I915_GGTT_VIEW_NORMAL: vma->pages = vma->obj->mm.pages; return 0; @@ -3823,11 +3816,6 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) case I915_GGTT_VIEW_PARTIAL: vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); break; - - default: - WARN_ONCE(1, "GGTT view %u not implemented!\n", - vma->ggtt_view.type); - return -EINVAL; } ret = 0; |