summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_renderstate.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-06-20 00:45:43 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2020-06-20 09:57:10 +0100
commitcf46143fe2c90d08554031b64529c8c1c0f9cb08 (patch)
tree207366ac246623bfc690b1a6109b9abaa4b07729 /drivers/gpu/drm/i915/gt/intel_renderstate.c
parent033ef711bbfb671ae58a1acb0f8440889c6000c6 (diff)
drm/i915/gt: Replace manual kmap_atomic() with pin_map for renderstate
We only emit the renderstate once now during module load, it is no longer a concern that we are delaying context creation and so do not need to so eagerly optimise. Since the last time we have looked at the renderstate, we have a pin_map / flush_map facility that supports simple single mappings, replacing the open-coded kmap_atomic() and prepare_write. As it should be a single page, of which we only write a small portion, we stick to a simple WB [kmap] and use clflush on !llc platforms, rather than creating a temporary WC vmapping for the single page. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200619234543.17499-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_renderstate.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c29
1 files changed, 9 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index f59e7875cc5e..6db23389e427 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -61,7 +61,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
#define OUT_BATCH(batch, i, val) \
do { \
if ((i) >= PAGE_SIZE / sizeof(u32)) \
- goto err; \
+ goto out; \
(batch)[(i)++] = (val); \
} while(0)
@@ -70,15 +70,12 @@ static int render_state_setup(struct intel_renderstate *so,
{
const struct intel_renderstate_rodata *rodata = so->rodata;
unsigned int i = 0, reloc_index = 0;
- unsigned int needs_clflush;
+ int ret = -EINVAL;
u32 *d;
- int ret;
- ret = i915_gem_object_prepare_write(so->vma->obj, &needs_clflush);
- if (ret)
- return ret;
-
- d = kmap_atomic(i915_gem_object_get_dirty_page(so->vma->obj, 0));
+ d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
@@ -89,7 +86,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (HAS_64BIT_RELOC(i915)) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
- goto err;
+ goto out;
d[i++] = s;
s = upper_32_bits(r);
@@ -103,7 +100,7 @@ static int render_state_setup(struct intel_renderstate *so,
if (rodata->reloc[reloc_index] != -1) {
drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index);
- goto err;
+ goto out;
}
so->batch_offset = i915_ggtt_offset(so->vma);
@@ -150,19 +147,11 @@ static int render_state_setup(struct intel_renderstate *so,
*/
so->aux_size = ALIGN(so->aux_size, 8);
- if (needs_clflush)
- drm_clflush_virt_range(d, i * sizeof(u32));
- kunmap_atomic(d);
-
ret = 0;
out:
- i915_gem_object_finish_access(so->vma->obj);
+ __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32));
+ i915_gem_object_unpin_map(so->vma->obj);
return ret;
-
-err:
- kunmap_atomic(d);
- ret = -EINVAL;
- goto out;
}
#undef OUT_BATCH