summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c152
1 files changed, 70 insertions, 82 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7e403eaa9e0f..21d72f695adb 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -543,9 +543,7 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct page *page = NULL;
- dma_addr_t addr;
- int order;
+ unsigned long size;
/*
* In order to utilize 64K pages for an object with a size < 2M, we will
@@ -559,48 +557,47 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* TODO: we should really consider write-protecting the scratch-page and
* sharing between ppgtt
*/
+ size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
- order = get_order(I915_GTT_PAGE_SIZE_64K);
- page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
- if (page) {
- addr = dma_map_page(vm->dma, page, 0,
- I915_GTT_PAGE_SIZE_64K,
- PCI_DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(vm->dma, addr))) {
- __free_pages(page, order);
- page = NULL;
- }
-
- if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
- dma_unmap_page(vm->dma, addr,
- I915_GTT_PAGE_SIZE_64K,
- PCI_DMA_BIDIRECTIONAL);
- __free_pages(page, order);
- page = NULL;
- }
- }
+ size = I915_GTT_PAGE_SIZE_64K;
+ gfp |= __GFP_NOWARN;
}
+ gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
+
+ do {
+ int order = get_order(size);
+ struct page *page;
+ dma_addr_t addr;
- if (!page) {
- order = 0;
- page = alloc_page(gfp | __GFP_ZERO);
+ page = alloc_pages(gfp, order);
if (unlikely(!page))
- return -ENOMEM;
+ goto skip;
- addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+ addr = dma_map_page(vm->dma, page, 0, size,
PCI_DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(vm->dma, addr))) {
- __free_page(page);
- return -ENOMEM;
- }
- }
+ if (unlikely(dma_mapping_error(vm->dma, addr)))
+ goto free_page;
- vm->scratch_page.page = page;
- vm->scratch_page.daddr = addr;
- vm->scratch_page.order = order;
+ if (unlikely(!IS_ALIGNED(addr, size)))
+ goto unmap_page;
- return 0;
+ vm->scratch_page.page = page;
+ vm->scratch_page.daddr = addr;
+ vm->scratch_page.order = order;
+ return 0;
+
+unmap_page:
+ dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
+free_page:
+ __free_pages(page, order);
+skip:
+ if (size == I915_GTT_PAGE_SIZE_4K)
+ return -ENOMEM;
+
+ size = I915_GTT_PAGE_SIZE_4K;
+ gfp &= ~__GFP_NOWARN;
+ } while (1);
}
static void cleanup_scratch_page(struct i915_address_space *vm)
@@ -676,27 +673,22 @@ static void free_pd(struct i915_address_space *vm,
static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- unsigned int i;
-
fill_px(vm, pd,
gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
- for (i = 0; i < I915_PDES; i++)
- pd->page_table[i] = vm->scratch_pt;
+ memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
}
static int __pdp_init(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- unsigned int i;
pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pdp->page_directory))
return -ENOMEM;
- for (i = 0; i < pdpes; i++)
- pdp->page_directory[i] = vm->scratch_pd;
+ memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
return 0;
}
@@ -718,7 +710,7 @@ alloc_pdp(struct i915_address_space *vm)
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!use_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@@ -767,25 +759,22 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
static void gen8_initialize_pml4(struct i915_address_space *vm,
struct i915_pml4 *pml4)
{
- unsigned int i;
-
fill_px(vm, pml4,
gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
- pml4->pdps[i] = vm->scratch_pdp;
+ memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
/* Broadwell Page Directory Pointer Descriptors */
-static int gen8_write_pdp(struct drm_i915_gem_request *req,
+static int gen8_write_pdp(struct i915_request *rq,
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
BUG_ON(entry >= 4);
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -795,20 +784,20 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
*cs++ = lower_32_bits(addr);
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
int i, ret;
for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- ret = gen8_write_pdp(req, i, pd_daddr);
+ ret = gen8_write_pdp(rq, i, pd_daddr);
if (ret)
return ret;
}
@@ -817,9 +806,9 @@ static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
}
static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
+ return gen8_write_pdp(rq, 0, px_dma(&ppgtt->pml4));
}
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
@@ -1743,13 +1732,13 @@ static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1759,19 +1748,19 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
+ struct intel_engine_cs *engine = rq->engine;
u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- cs = intel_ring_begin(req, 6);
+ cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1781,16 +1770,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
*cs++ = get_pd_offset(ppgtt);
*cs++ = MI_NOOP;
- intel_ring_advance(req, cs);
+ intel_ring_advance(rq, cs);
return 0;
}
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+ struct i915_request *rq)
{
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_private *dev_priv = req->i915;
+ struct intel_engine_cs *engine = rq->engine;
+ struct drm_i915_private *dev_priv = rq->i915;
I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
@@ -2112,7 +2101,7 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
ppgtt->base.i915 = dev_priv;
ppgtt->base.dma = &dev_priv->drm.pdev->dev;
- if (INTEL_INFO(dev_priv)->gen < 8)
+ if (INTEL_GEN(dev_priv) < 8)
return gen6_ppgtt_init(ppgtt);
else
return gen8_ppgtt_init(ppgtt);
@@ -2260,9 +2249,9 @@ void i915_ppgtt_release(struct kref *kref)
trace_i915_ppgtt_release(&ppgtt->base);
/* vmas should already be unbound and destroyed */
- WARN_ON(!list_empty(&ppgtt->base.active_list));
- WARN_ON(!list_empty(&ppgtt->base.inactive_list));
- WARN_ON(!list_empty(&ppgtt->base.unbound_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.active_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list));
+ GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list));
ppgtt->base.cleanup(&ppgtt->base);
i915_address_space_fini(&ppgtt->base);
@@ -2370,9 +2359,10 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
do {
- if (dma_map_sg(&obj->base.dev->pdev->dev,
- pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL))
+ if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL,
+ DMA_ATTR_NO_WARN))
return 0;
/* If the DMA remap fails, one cause can be that we have
@@ -2824,10 +2814,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
i915->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+ GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma);
ggtt->base.bind_vma = aliasing_gtt_bind_vma;
- WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
+ GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
return 0;
@@ -2918,7 +2908,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt->base.closed = true;
mutex_lock(&dev_priv->drm.struct_mutex);
- WARN_ON(!list_empty(&ggtt->base.active_list));
+ GEM_BUG_ON(!list_empty(&ggtt->base.active_list));
list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3811,6 +3801,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
switch (vma->ggtt_view.type) {
+ default:
+ GEM_BUG_ON(vma->ggtt_view.type);
+ /* fall through */
case I915_GGTT_VIEW_NORMAL:
vma->pages = vma->obj->mm.pages;
return 0;
@@ -3823,11 +3816,6 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
case I915_GGTT_VIEW_PARTIAL:
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
break;
-
- default:
- WARN_ONCE(1, "GGTT view %u not implemented!\n",
- vma->ggtt_view.type);
- return -EINVAL;
}
ret = 0;