summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_context.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-04-26 17:33:34 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-04-26 18:32:11 +0100
commit5e2a0419ef7cb25d0f9a5fd6a62372bb47ce948d (patch)
treebc4b3c446c635415138cd956ad042abbf5fb404c /drivers/gpu/drm/i915/gt/intel_context.c
parent11334c6aad9500a9d3b9b48dd0a4bb6406eb88fb (diff)
drm/i915: Switch back to an array of logical per-engine HW contexts
We switched to a tree of per-engine HW context to accommodate the introduction of virtual engines. However, we plan to also support multiple instances of the same engine within the GEM context, defeating our use of the engine as a key to looking up the HW context. Just allocate a logical per-engine instance and always use an index into the ctx->engines[]. Later on, this ctx->engines[] may be replaced by a user specified map. v2: Add for_each_gem_engine() helper to iterator within the engines lock v3: intel_context_create_request() helper v4: s/unsigned long/unsigned int/ 4 billion engines is quite enough. v5: Push iterator locking to caller Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190426163336.15906-7-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_context.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c112
1 files changed, 21 insertions, 91 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 15ac99c5dd4a..5e506e648454 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -17,7 +17,7 @@ static struct i915_global_context {
struct kmem_cache *slab_ce;
} global;
-struct intel_context *intel_context_alloc(void)
+static struct intel_context *intel_context_alloc(void)
{
return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
}
@@ -28,104 +28,17 @@ void intel_context_free(struct intel_context *ce)
}
struct intel_context *
-intel_context_lookup(struct i915_gem_context *ctx,
+intel_context_create(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
- struct intel_context *ce = NULL;
- struct rb_node *p;
-
- spin_lock(&ctx->hw_contexts_lock);
- p = ctx->hw_contexts.rb_node;
- while (p) {
- struct intel_context *this =
- rb_entry(p, struct intel_context, node);
-
- if (this->engine == engine) {
- GEM_BUG_ON(this->gem_context != ctx);
- ce = this;
- break;
- }
-
- if (this->engine < engine)
- p = p->rb_right;
- else
- p = p->rb_left;
- }
- spin_unlock(&ctx->hw_contexts_lock);
-
- return ce;
-}
-
-struct intel_context *
-__intel_context_insert(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
-{
- struct rb_node **p, *parent;
- int err = 0;
-
- spin_lock(&ctx->hw_contexts_lock);
-
- parent = NULL;
- p = &ctx->hw_contexts.rb_node;
- while (*p) {
- struct intel_context *this;
-
- parent = *p;
- this = rb_entry(parent, struct intel_context, node);
-
- if (this->engine == engine) {
- err = -EEXIST;
- ce = this;
- break;
- }
-
- if (this->engine < engine)
- p = &parent->rb_right;
- else
- p = &parent->rb_left;
- }
- if (!err) {
- rb_link_node(&ce->node, parent, p);
- rb_insert_color(&ce->node, &ctx->hw_contexts);
- }
-
- spin_unlock(&ctx->hw_contexts_lock);
-
- return ce;
-}
-
-void __intel_context_remove(struct intel_context *ce)
-{
- struct i915_gem_context *ctx = ce->gem_context;
-
- spin_lock(&ctx->hw_contexts_lock);
- rb_erase(&ce->node, &ctx->hw_contexts);
- spin_unlock(&ctx->hw_contexts_lock);
-}
-
-struct intel_context *
-intel_context_instance(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- struct intel_context *ce, *pos;
-
- ce = intel_context_lookup(ctx, engine);
- if (likely(ce))
- return intel_context_get(ce);
+ struct intel_context *ce;
ce = intel_context_alloc();
if (!ce)
return ERR_PTR(-ENOMEM);
intel_context_init(ce, ctx, engine);
-
- pos = __intel_context_insert(ctx, engine, ce);
- if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
- intel_context_free(ce);
-
- GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
- return intel_context_get(pos);
+ return ce;
}
int __intel_context_do_pin(struct intel_context *ce)
@@ -204,6 +117,8 @@ intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
+ GEM_BUG_ON(!engine->cops);
+
kref_init(&ce->ref);
ce->gem_context = ctx;
@@ -254,3 +169,18 @@ void intel_context_exit_engine(struct intel_context *ce)
{
intel_engine_pm_put(ce->engine);
}
+
+struct i915_request *intel_context_create_request(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (unlikely(err))
+ return ERR_PTR(err);
+
+ rq = i915_request_create(ce);
+ intel_context_unpin(ce);
+
+ return rq;
+}