summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_perf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_perf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c90
1 files changed, 55 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 39a4804091d7..c4995d5a16d2 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -195,6 +195,8 @@
#include <linux/sizes.h>
#include <linux/uuid.h>
+#include "gt/intel_lrc_reg.h"
+
#include "i915_drv.h"
#include "i915_oa_hsw.h"
#include "i915_oa_bdw.h"
@@ -210,7 +212,6 @@
#include "i915_oa_cflgt3.h"
#include "i915_oa_cnl.h"
#include "i915_oa_icl.h"
-#include "intel_lrc_reg.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1202,28 +1203,35 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct i915_gem_engines_iter it;
struct intel_context *ce;
- int ret;
+ int err;
- ret = i915_mutex_lock_interruptible(&i915->drm);
- if (ret)
- return ERR_PTR(ret);
+ err = i915_mutex_lock_interruptible(&i915->drm);
+ if (err)
+ return ERR_PTR(err);
- /*
- * As the ID is the gtt offset of the context's vma we
- * pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
- */
- ce = intel_context_pin(ctx, engine);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ce))
- return ce;
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /*
+ * As the ID is the gtt offset of the context's vma we
+ * pin the vma to ensure the ID remains fixed.
+ */
+ err = intel_context_pin(ce);
+ if (err == 0) {
+ i915->perf.oa.pinned_ctx = ce;
+ break;
+ }
+ }
+ i915_gem_context_unlock_engines(ctx);
- i915->perf.oa.pinned_ctx = ce;
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return ERR_PTR(err);
- return ce;
+ return i915->perf.oa.pinned_ctx;
}
/**
@@ -1679,7 +1687,7 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
CTX_REG(reg_state,
CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(i915, &ce->sseu));
+ intel_sseu_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1709,7 +1717,6 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1738,30 +1745,43 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = intel_context_lookup(ctx, engine);
- u32 *regs;
-
- /* OA settings will be set upon first use */
- if (!ce || !ce->state)
- continue;
-
- regs = i915_gem_object_pin_map(ce->state->obj, map_type);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce,
+ i915_gem_context_lock_engines(ctx),
+ it) {
+ u32 *regs;
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /* OA settings will be set upon first use */
+ if (!ce->state)
+ continue;
+
+ regs = i915_gem_object_pin_map(ce->state->obj,
+ map_type);
+ if (IS_ERR(regs)) {
+ i915_gem_context_unlock_engines(ctx);
+ return PTR_ERR(regs);
+ }
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+ ce->state->obj->mm.dirty = true;
+ regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ce, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
- i915_gem_object_unpin_map(ce->state->obj);
+ i915_gem_object_unpin_map(ce->state->obj);
+ }
+ i915_gem_context_unlock_engines(ctx);
}
/*
* Apply the configuration by doing one context restore of the edited
* context image.
*/
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
+ rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);