summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_ppgtt.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-07-29 17:42:18 +0100
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2020-09-07 14:24:08 +0300
commit89351925a477441ae5fdd0136aec80b49ea1c53c (patch)
treee9f74956244bfc4e75c3cc670d0c7512045ccc2b /drivers/gpu/drm/i915/gt/intel_ppgtt.c
parentcd0452aa2a0d146ff2c695877a4f5d3e4d09b435 (diff)
drm/i915/gt: Switch to object allocations for page directories
The GEM object is grossly overweight for the practicality of tracking large numbers of individual pages, yet it is currently our only abstraction for tracking DMA allocations. Since those allocations need to be reserved upfront before an operation, and that we need to break away from simple system memory, we need to ditch using plain struct page wrappers. In the process, we drop the WC mapping as we ended up clflushing everything anyway due to various issues across a wider range of platforms. Though in a future step, we need to drop the kmap_atomic approach which suggests we need to pre-map all the pages and keep them mapped. v2: Verify our large scratch page is suitably DMA aligned; and manually clear the scratch since we are allocating plain struct pages full of prior content. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200729164219.5737-2-chris@chris-wilson.co.uk Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_ppgtt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c42
1 files changed, 31 insertions, 11 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 09056544bc29..ede6369a9092 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -18,7 +18,8 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm)
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, &pt->base))) {
+ pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pt->base)) {
kfree(pt);
return ERR_PTR(-ENOMEM);
}
@@ -47,7 +48,8 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+ if (IS_ERR(pd->pt.base)) {
kfree(pd);
return ERR_PTR(-ENOMEM);
}
@@ -55,27 +57,28 @@ struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
return pd;
}
-void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
+void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
{
- cleanup_page_dma(vm, pd);
- kfree(pd);
+ i915_gem_object_put(pt->base);
+ kfree(pt);
}
static inline void
-write_dma_entry(struct i915_page_dma * const pdma,
+write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
{
- u64 * const vaddr = kmap_atomic(pdma->page);
+ u64 * const vaddr = kmap_atomic(__px_page(pdma));
vaddr[idx] = encoded_entry;
+ clflush_cache_range(&vaddr[idx], sizeof(u64));
kunmap_atomic(vaddr);
}
void
__set_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
- struct i915_page_dma * const to,
+ struct i915_page_table * const to,
u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
{
/* Each thread pre-pins the pd, and we may have a thread per pde. */
@@ -83,13 +86,13 @@ __set_pd_entry(struct i915_page_directory * const pd,
atomic_inc(px_used(pd));
pd->entry[idx] = to;
- write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+ write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
}
void
clear_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
- const struct i915_page_scratch * const scratch)
+ const struct drm_i915_gem_object * const scratch)
{
GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
@@ -102,7 +105,7 @@ bool
release_pd_entry(struct i915_page_directory * const pd,
const unsigned short idx,
struct i915_page_table * const pt,
- const struct i915_page_scratch * const scratch)
+ const struct drm_i915_gem_object * const scratch)
{
bool free = false;
@@ -234,6 +237,23 @@ int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
return 0;
}
+int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash)
+{
+ struct i915_page_table *pt;
+ int n, err;
+
+ for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
+ for (pt = stash->pt[n]; pt; pt = pt->stash) {
+ err = pin_pt_dma(vm, pt->base);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{