summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2021-04-27 09:54:13 +0100
committerMatthew Auld <matthew.auld@intel.com>2021-04-27 16:21:47 +0100
commit529b9ec809a0a073192e5dc757fc383e11926661 (patch)
tree0a0ddef7b05ca6910d61a4caecc3d8742219810f /drivers/gpu/drm/i915/gt/gen8_ppgtt.c
parentfa85bfd19c26739c4cc02afbbab2588332648e9d (diff)
drm/i915/gtt: map the PD up front
We need to generalise our accessor for the page directories and tables from using the simple kmap_atomic to support local memory, and this setup must be done on acquisition of the backing storage prior to entering fence execution contexts. Here we replace the kmap with the object mapping code that for simple single page shmemfs object will return a plain kmap, that is then kept for the lifetime of the page directory. Note that keeping the mapping around is a potential concern here, since while the vma is pinned the mapping remains there for the PDs underneath, or at least until the used_count reaches zero, at which point we can safely destroy the mapping. For 32b this will be even worse since the address space is more limited, but since this change mostly impacts full ppGTT platforms, the justification is that for modern platforms we shouldn't care too much about 32b. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210427085417.120246-3-matthew.auld@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gt/gen8_ppgtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c26
1 files changed, 10 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 74bf6fc8461f..90adfee52b41 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -242,11 +242,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
atomic_read(&pt->used));
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset64(vaddr + gen8_pd_index(start, 0),
vm->scratch[0]->encode,
count);
- kunmap_atomic(vaddr);
atomic_sub(count, &pt->used);
start += count;
@@ -375,7 +374,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
@@ -402,12 +401,10 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
return idx;
}
@@ -442,7 +439,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
} else {
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
@@ -457,7 +454,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
maybe_64K = __gen8_pte_index(start, 1);
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
}
do {
@@ -491,7 +488,6 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
} while (rem >= page_size && index < I915_PDES);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
@@ -505,9 +501,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -523,12 +518,11 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
u16 i;
encode = vma->vm->scratch[0]->encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+ vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- kunmap_atomic(vaddr);
}
}
@@ -602,7 +596,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (IS_ERR(obj))
goto free_scratch;
- ret = pin_pt_dma(vm, obj);
+ ret = map_pt_dma(vm, obj);
if (ret) {
i915_gem_object_put(obj);
goto free_scratch;
@@ -639,7 +633,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
if (IS_ERR(pde))
return PTR_ERR(pde);
- err = pin_pt_dma(vm, pde->pt.base);
+ err = map_pt_dma(vm, pde->pt.base);
if (err) {
free_pd(vm, pde);
return err;
@@ -674,7 +668,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
goto err_pd;
}
- err = pin_pt_dma(vm, pd->pt.base);
+ err = map_pt_dma(vm, pd->pt.base);
if (err)
goto err_pd;