diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gem/i915_gem_context.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_context.c | 2761 |
1 files changed, 1437 insertions, 1324 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 7720b8c22c81..3215ef49c975 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2011-2012 Intel Corporation */ @@ -64,38 +63,42 @@ * */ +#include <linux/highmem.h> #include <linux/log2.h> #include <linux/nospec.h> +#include <drm/drm_cache.h> +#include <drm/drm_print.h> +#include <drm/drm_syncobj.h> + #include "gt/gen6_ppgtt.h" #include "gt/intel_context.h" #include "gt/intel_context_param.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_user.h" -#include "gt/intel_execlists_submission.h" /* virtual_engine */ #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" +#include "gt/shmem_utils.h" + +#include "pxp/intel_pxp.h" +#include "i915_file_private.h" #include "i915_gem_context.h" -#include "i915_globals.h" #include "i915_trace.h" #include "i915_user_extensions.h" #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 -static struct i915_global_gem_context { - struct i915_global base; - struct kmem_cache *slab_luts; -} global; +static struct kmem_cache *slab_luts; struct i915_lut_handle *i915_lut_handle_alloc(void) { - return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); + return kmem_cache_alloc(slab_luts, GFP_KERNEL); } void i915_lut_handle_free(struct i915_lut_handle *lut) { - return kmem_cache_free(global.slab_luts, lut); + return kmem_cache_free(slab_luts, lut); } static void lut_close(struct i915_gem_context *ctx) @@ -167,73 +170,861 @@ lookup_user_engine(struct i915_gem_context *ctx, return i915_gem_context_get_engine(ctx, idx); } -static struct i915_address_space * -context_get_vm_rcu(struct i915_gem_context *ctx) +static int validate_priority(struct drm_i915_private *i915, + const struct drm_i915_gem_context_param *args) { - GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); + s64 priority = args->value; - do { - struct i915_address_space *vm; + if (args->size) + return -EINVAL; + + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + return -ENODEV; + + if (priority > I915_CONTEXT_MAX_USER_PRIORITY || + priority < I915_CONTEXT_MIN_USER_PRIORITY) + return -EINVAL; + + if (priority > I915_CONTEXT_DEFAULT_PRIORITY && + !capable(CAP_SYS_NICE)) + return -EPERM; + + return 0; +} + +static void proto_context_close(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc) +{ + int i; + + if (pc->pxp_wakeref) + intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); + if (pc->vm) + i915_vm_put(pc->vm); + if (pc->user_engines) { + for (i = 0; i < pc->num_user_engines; i++) + kfree(pc->user_engines[i].siblings); + kfree(pc->user_engines); + } + kfree(pc); +} +static int proto_context_set_persistence(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc, + bool persist) +{ + if (persist) { /* - * We do not allow downgrading from full-ppgtt [to a shared - * global gtt], so ctx->vm cannot become NULL. + * Only contexts that are short-lived [that will expire or be + * reset] are allowed to survive past termination. We require + * hangcheck to ensure that the persistent requests are healthy. */ - vm = rcu_dereference(ctx->vm); - if (!kref_get_unless_zero(&vm->ref)) - continue; + if (!i915->params.enable_hangcheck) + return -EINVAL; + + pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); + } else { + /* To cancel a context we use "preempt-to-idle" */ + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + return -ENODEV; /* - * This ppgtt may have be reallocated between - * the read and the kref, and reassigned to a third - * context. In order to avoid inadvertent sharing - * of this ppgtt with that third context (and not - * src), we have to confirm that we have the same - * ppgtt after passing through the strong memory - * barrier implied by a successful - * kref_get_unless_zero(). + * If the cancel fails, we then need to reset, cleanly! * - * Once we have acquired the current ppgtt of ctx, - * we no longer care if it is released from ctx, as - * it cannot be reallocated elsewhere. + * If the per-engine reset fails, all hope is lost! We resort + * to a full GPU reset in that unlikely case, but realistically + * if the engine could not reset, the full reset does not fare + * much better. The damage has been done. + * + * However, if we cannot reset an engine by itself, we cannot + * cleanup a hanging persistent context without causing + * collateral damage, and we should not pretend we can by + * exposing the interface. */ + if (!intel_has_reset_engine(to_gt(i915))) + return -ENODEV; - if (vm == rcu_access_pointer(ctx->vm)) - return rcu_pointer_handoff(vm); + pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); + } - i915_vm_put(vm); - } while (1); + return 0; } -static void intel_context_set_gem(struct intel_context *ce, - struct i915_gem_context *ctx) +static int proto_context_set_protected(struct drm_i915_private *i915, + struct i915_gem_proto_context *pc, + bool protected) { - GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); - RCU_INIT_POINTER(ce->gem_context, ctx); + int ret = 0; + + if (!protected) { + pc->uses_protected_content = false; + } else if (!intel_pxp_is_enabled(i915->pxp)) { + ret = -ENODEV; + } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || + !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { + ret = -EPERM; + } else { + pc->uses_protected_content = true; + + /* + * protected context usage requires the PXP session to be up, + * which in turn requires the device to be active. + */ + pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + if (!intel_pxp_is_active(i915->pxp)) + ret = intel_pxp_start(i915->pxp); + } + + return ret; +} + +static struct i915_gem_proto_context * +proto_context_create(struct drm_i915_file_private *fpriv, + struct drm_i915_private *i915, unsigned int flags) +{ + struct i915_gem_proto_context *pc, *err; + + pc = kzalloc(sizeof(*pc), GFP_KERNEL); + if (!pc) + return ERR_PTR(-ENOMEM); + + pc->fpriv = fpriv; + pc->num_user_engines = -1; + pc->user_engines = NULL; + pc->user_flags = BIT(UCONTEXT_BANNABLE) | + BIT(UCONTEXT_RECOVERABLE); + if (i915->params.enable_hangcheck) + pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); + pc->sched.priority = I915_PRIORITY_NORMAL; + + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { + if (!HAS_EXECLISTS(i915)) { + err = ERR_PTR(-EINVAL); + goto proto_close; + } + pc->single_timeline = true; + } + + return pc; + +proto_close: + proto_context_close(i915, pc); + return err; +} + +static int proto_context_register_locked(struct drm_i915_file_private *fpriv, + struct i915_gem_proto_context *pc, + u32 *id) +{ + int ret; + void *old; + + lockdep_assert_held(&fpriv->proto_context_lock); + + ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); + if (ret) + return ret; + + old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); + if (xa_is_err(old)) { + xa_erase(&fpriv->context_xa, *id); + return xa_err(old); + } + WARN_ON(old); + + return 0; +} + +static int proto_context_register(struct drm_i915_file_private *fpriv, + struct i915_gem_proto_context *pc, + u32 *id) +{ + int ret; + + mutex_lock(&fpriv->proto_context_lock); + ret = proto_context_register_locked(fpriv, pc, id); + mutex_unlock(&fpriv->proto_context_lock); + + return ret; +} + +static struct i915_address_space * +i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) +{ + struct i915_address_space *vm; + + xa_lock(&file_priv->vm_xa); + vm = xa_load(&file_priv->vm_xa, id); + if (vm) + kref_get(&vm->ref); + xa_unlock(&file_priv->vm_xa); + + return vm; +} + +static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, + struct i915_gem_proto_context *pc, + const struct drm_i915_gem_context_param *args) +{ + struct drm_i915_private *i915 = fpriv->i915; + struct i915_address_space *vm; + + if (args->size) + return -EINVAL; + + if (!HAS_FULL_PPGTT(i915)) + return -ENODEV; + + if (upper_32_bits(args->value)) + return -ENOENT; + + vm = i915_gem_vm_lookup(fpriv, args->value); + if (!vm) + return -ENOENT; + + if (pc->vm) + i915_vm_put(pc->vm); + pc->vm = vm; + + return 0; +} + +struct set_proto_ctx_engines { + struct drm_i915_private *i915; + unsigned num_engines; + struct i915_gem_proto_engine *engines; +}; + +static int +set_proto_ctx_engines_balance(struct i915_user_extension __user *base, + void *data) +{ + struct i915_context_engines_load_balance __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_proto_ctx_engines *set = data; + struct drm_i915_private *i915 = set->i915; + struct intel_engine_cs **siblings; + u16 num_siblings, idx; + unsigned int n; + int err; + + if (!HAS_EXECLISTS(i915)) + return -ENODEV; + + if (get_user(idx, &ext->engine_index)) + return -EFAULT; + + if (idx >= set->num_engines) { + drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", + idx, set->num_engines); + return -EINVAL; + } + + idx = array_index_nospec(idx, set->num_engines); + if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { + drm_dbg(&i915->drm, + "Invalid placement[%d], already occupied\n", idx); + return -EEXIST; + } + + if (get_user(num_siblings, &ext->num_siblings)) + return -EFAULT; + + err = check_user_mbz(&ext->flags); + if (err) + return err; + + err = check_user_mbz(&ext->mbz64); + if (err) + return err; + + if (num_siblings == 0) + return 0; + + siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); + if (!siblings) + return -ENOMEM; + + for (n = 0; n < num_siblings; n++) { + struct i915_engine_class_instance ci; + + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { + err = -EFAULT; + goto err_siblings; + } + + siblings[n] = intel_engine_lookup_user(i915, + ci.engine_class, + ci.engine_instance); + if (!siblings[n]) { + drm_dbg(&i915->drm, + "Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); + err = -EINVAL; + goto err_siblings; + } + } + + if (num_siblings == 1) { + set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; + set->engines[idx].engine = siblings[0]; + kfree(siblings); + } else { + set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; + set->engines[idx].num_siblings = num_siblings; + set->engines[idx].siblings = siblings; + } + + return 0; + +err_siblings: + kfree(siblings); + + return err; +} + +static int +set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) +{ + struct i915_context_engines_bond __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_proto_ctx_engines *set = data; + struct drm_i915_private *i915 = set->i915; + struct i915_engine_class_instance ci; + struct intel_engine_cs *master; + u16 idx, num_bonds; + int err, n; + + if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && + !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { + drm_dbg(&i915->drm, + "Bonding not supported on this platform\n"); + return -ENODEV; + } + + if (get_user(idx, &ext->virtual_index)) + return -EFAULT; + + if (idx >= set->num_engines) { + drm_dbg(&i915->drm, + "Invalid index for virtual engine: %d >= %d\n", + idx, set->num_engines); + return -EINVAL; + } + + idx = array_index_nospec(idx, set->num_engines); + if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { + drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); + return -EINVAL; + } + + if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { + drm_dbg(&i915->drm, + "Bonding with virtual engines not allowed\n"); + return -EINVAL; + } + + err = check_user_mbz(&ext->flags); + if (err) + return err; + + for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { + err = check_user_mbz(&ext->mbz64[n]); + if (err) + return err; + } + + if (copy_from_user(&ci, &ext->master, sizeof(ci))) + return -EFAULT; + + master = intel_engine_lookup_user(i915, + ci.engine_class, + ci.engine_instance); + if (!master) { + drm_dbg(&i915->drm, + "Unrecognised master engine: { class:%u, instance:%u }\n", + ci.engine_class, ci.engine_instance); + return -EINVAL; + } - if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) - ce->ring = __intel_context_ring_size(SZ_16K); + if (intel_engine_uses_guc(master)) { + drm_dbg(&i915->drm, "bonding extension not supported with GuC submission"); + return -ENODEV; + } - if (rcu_access_pointer(ctx->vm)) { - struct i915_address_space *vm; + if (get_user(num_bonds, &ext->num_bonds)) + return -EFAULT; - rcu_read_lock(); - vm = context_get_vm_rcu(ctx); /* hmm */ - rcu_read_unlock(); + for (n = 0; n < num_bonds; n++) { + struct intel_engine_cs *bond; - i915_vm_put(ce->vm); - ce->vm = vm; + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) + return -EFAULT; + + bond = intel_engine_lookup_user(i915, + ci.engine_class, + ci.engine_instance); + if (!bond) { + drm_dbg(&i915->drm, + "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", + n, ci.engine_class, ci.engine_instance); + return -EINVAL; + } } - GEM_BUG_ON(ce->timeline); - if (ctx->timeline) - ce->timeline = intel_timeline_get(ctx->timeline); + return 0; +} + +static int +set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, + void *data) +{ + struct i915_context_engines_parallel_submit __user *ext = + container_of_user(base, typeof(*ext), base); + const struct set_proto_ctx_engines *set = data; + struct drm_i915_private *i915 = set->i915; + struct i915_engine_class_instance prev_engine; + u64 flags; + int err = 0, n, i, j; + u16 slot, width, num_siblings; + struct intel_engine_cs **siblings = NULL; + intel_engine_mask_t prev_mask; + + if (get_user(slot, &ext->engine_index)) + return -EFAULT; + + if (get_user(width, &ext->width)) + return -EFAULT; + + if (get_user(num_siblings, &ext->num_siblings)) + return -EFAULT; + + if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && + num_siblings != 1) { + drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", + num_siblings); + return -EINVAL; + } + + if (slot >= set->num_engines) { + drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", + slot, set->num_engines); + return -EINVAL; + } + + if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { + drm_dbg(&i915->drm, + "Invalid placement[%d], already occupied\n", slot); + return -EINVAL; + } + + if (get_user(flags, &ext->flags)) + return -EFAULT; + + if (flags) { + drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); + return -EINVAL; + } + + for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { + err = check_user_mbz(&ext->mbz64[n]); + if (err) + return err; + } + + if (width < 2) { + drm_dbg(&i915->drm, "Width (%d) < 2\n", width); + return -EINVAL; + } + + if (num_siblings < 1) { + drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", + num_siblings); + return -EINVAL; + } + + siblings = kmalloc_array(num_siblings * width, + sizeof(*siblings), + GFP_KERNEL); + if (!siblings) + return -ENOMEM; + + /* Create contexts / engines */ + for (i = 0; i < width; ++i) { + intel_engine_mask_t current_mask = 0; + + for (j = 0; j < num_siblings; ++j) { + struct i915_engine_class_instance ci; + + n = i * num_siblings + j; + if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { + err = -EFAULT; + goto out_err; + } + + siblings[n] = + intel_engine_lookup_user(i915, ci.engine_class, + ci.engine_instance); + if (!siblings[n]) { + drm_dbg(&i915->drm, + "Invalid sibling[%d]: { class:%d, inst:%d }\n", + n, ci.engine_class, ci.engine_instance); + err = -EINVAL; + goto out_err; + } + + /* + * We don't support breadcrumb handshake on these + * classes + */ + if (siblings[n]->class == RENDER_CLASS || + siblings[n]->class == COMPUTE_CLASS) { + err = -EINVAL; + goto out_err; + } + + if (n) { + if (prev_engine.engine_class != + ci.engine_class) { + drm_dbg(&i915->drm, + "Mismatched class %d, %d\n", + prev_engine.engine_class, + ci.engine_class); + err = -EINVAL; + goto out_err; + } + } + + prev_engine = ci; + current_mask |= siblings[n]->logical_mask; + } + + if (i > 0) { + if (current_mask != prev_mask << 1) { + drm_dbg(&i915->drm, + "Non contiguous logical mask 0x%x, 0x%x\n", + prev_mask, current_mask); + err = -EINVAL; + goto out_err; + } + } + prev_mask = current_mask; + } + + set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; + set->engines[slot].num_siblings = num_siblings; + set->engines[slot].width = width; + set->engines[slot].siblings = siblings; + + return 0; + +out_err: + kfree(siblings); + + return err; +} + +static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { + [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, + [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, + [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = + set_proto_ctx_engines_parallel_submit, +}; + +static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, + struct i915_gem_proto_context *pc, + const struct drm_i915_gem_context_param *args) +{ + struct drm_i915_private *i915 = fpriv->i915; + struct set_proto_ctx_engines set = { .i915 = i915 }; + struct i915_context_param_engines __user *user = + u64_to_user_ptr(args->value); + unsigned int n; + u64 extensions; + int err; + + if (pc->num_user_engines >= 0) { + drm_dbg(&i915->drm, "Cannot set engines twice"); + return -EINVAL; + } + + if (args->size < sizeof(*user) || + !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { + drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", + args->size); + return -EINVAL; + } + + set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); + /* RING_MASK has no shift so we can use it directly here */ + if (set.num_engines > I915_EXEC_RING_MASK + 1) + return -EINVAL; + + set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); + if (!set.engines) + return -ENOMEM; + + for (n = 0; n < set.num_engines; n++) { + struct i915_engine_class_instance ci; + struct intel_engine_cs *engine; + + if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { + kfree(set.engines); + return -EFAULT; + } + + memset(&set.engines[n], 0, sizeof(set.engines[n])); + + if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && + ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) + continue; + + engine = intel_engine_lookup_user(i915, + ci.engine_class, + ci.engine_instance); + if (!engine) { + drm_dbg(&i915->drm, + "Invalid engine[%d]: { class:%d, instance:%d }\n", + n, ci.engine_class, ci.engine_instance); + kfree(set.engines); + return -ENOENT; + } + + set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; + set.engines[n].engine = engine; + } + + err = -EFAULT; + if (!get_user(extensions, &user->extensions)) + err = i915_user_extensions(u64_to_user_ptr(extensions), + set_proto_ctx_engines_extensions, + ARRAY_SIZE(set_proto_ctx_engines_extensions), + &set); + if (err) { + kfree(set.engines); + return err; + } + + pc->num_user_engines = set.num_engines; + pc->user_engines = set.engines; + + return 0; +} + +static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, + struct i915_gem_proto_context *pc, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_private *i915 = fpriv->i915; + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_sseu *sseu; + int ret; + + if (args->size < sizeof(user_sseu)) + return -EINVAL; + + if (GRAPHICS_VER(i915) != 11) + return -ENODEV; + + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.rsvd) + return -EINVAL; + + if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) + return -EINVAL; + + if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) + return -EINVAL; + + if (pc->num_user_engines >= 0) { + int idx = user_sseu.engine.engine_instance; + struct i915_gem_proto_engine *pe; + + if (idx >= pc->num_user_engines) + return -EINVAL; + + idx = array_index_nospec(idx, pc->num_user_engines); + pe = &pc->user_engines[idx]; + + /* Only render engine supports RPCS configuration. */ + if (pe->engine->class != RENDER_CLASS) + return -EINVAL; + + sseu = &pe->sseu; + } else { + /* Only render engine supports RPCS configuration. */ + if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) + return -EINVAL; + + /* There is only one render engine */ + if (user_sseu.engine.engine_instance != 0) + return -EINVAL; + + sseu = &pc->legacy_rcs_sseu; + } + + ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); + if (ret) + return ret; + + args->size = sizeof(user_sseu); + + return 0; +} + +static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, + struct i915_gem_proto_context *pc, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_private *i915 = fpriv->i915; + int ret = 0; + + switch (args->param) { + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + if (args->size) + ret = -EINVAL; + else if (args->value) + pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); + else + pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); + break; + + case I915_CONTEXT_PARAM_BANNABLE: + if (args->size) + ret = -EINVAL; + else if (!capable(CAP_SYS_ADMIN) && !args->value) + ret = -EPERM; + else if (args->value) + pc->user_flags |= BIT(UCONTEXT_BANNABLE); + else if (pc->uses_protected_content) + ret = -EPERM; + else + pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); + break; + + case I915_CONTEXT_PARAM_LOW_LATENCY: + if (intel_uc_uses_guc_submission(&to_gt(i915)->uc)) + pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY); + else + ret = -EINVAL; + break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + if (args->size) + ret = -EINVAL; + else if (!args->value) + pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); + else if (pc->uses_protected_content) + ret = -EPERM; + else + pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); + break; + + case I915_CONTEXT_PARAM_PRIORITY: + ret = validate_priority(fpriv->i915, args); + if (!ret) + pc->sched.priority = args->value; + break; + + case I915_CONTEXT_PARAM_SSEU: + ret = set_proto_ctx_sseu(fpriv, pc, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = set_proto_ctx_vm(fpriv, pc, args); + break; + + case I915_CONTEXT_PARAM_ENGINES: + ret = set_proto_ctx_engines(fpriv, pc, args); + break; + + case I915_CONTEXT_PARAM_PERSISTENCE: + if (args->size) + ret = -EINVAL; + else + ret = proto_context_set_persistence(fpriv->i915, pc, + args->value); + break; + + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + ret = proto_context_set_protected(fpriv->i915, pc, + args->value); + break; + + case I915_CONTEXT_PARAM_NO_ZEROMAP: + case I915_CONTEXT_PARAM_BAN_PERIOD: + case I915_CONTEXT_PARAM_RINGSIZE: + case I915_CONTEXT_PARAM_CONTEXT_IMAGE: + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int intel_context_set_gem(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_sseu sseu) +{ + int ret = 0; + + GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); + RCU_INIT_POINTER(ce->gem_context, ctx); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->engine->class == COMPUTE_CLASS) + ce->ring_size = SZ_512K; + else + ce->ring_size = SZ_16K; + + i915_vm_put(ce->vm); + ce->vm = i915_gem_context_get_eb_vm(ctx); if (ctx->sched.priority >= I915_PRIORITY_NORMAL && - intel_engine_has_timeslices(ce->engine)) + intel_engine_has_timeslices(ce->engine) && + intel_engine_has_semaphores(ce->engine)) __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); - intel_context_set_watchdog_us(ce, ctx->watchdog.timeout_us); + if (CONFIG_DRM_I915_REQUEST_TIMEOUT && + ctx->i915->params.request_timeout_ms) { + unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; + + intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); + } + + /* A valid SSEU has no zero fields */ + if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) + ret = intel_context_reconfigure_sseu(ce, sseu); + + if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags)) + __set_bit(CONTEXT_LOW_LATENCY, &ce->flags); + + return ret; +} + +static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) +{ + while (count--) { + struct intel_context *ce = e->engines[count], *child; + + if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) + continue; + + for_each_child(ce, child) + intel_context_unpin(child); + intel_context_unpin(ce); + } +} + +static void unpin_engines(struct i915_gem_engines *e) +{ + __unpin_engines(e, e->num_engines); } static void __free_engines(struct i915_gem_engines *e, unsigned int count) @@ -261,23 +1052,44 @@ static void free_engines_rcu(struct rcu_head *rcu) free_engines(engines); } -static int __i915_sw_fence_call +static void accumulate_runtime(struct i915_drm_client *client, + struct i915_gem_engines *engines) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + + if (!client) + return; + + /* Transfer accumulated runtime to the parent GEM context. */ + for_each_gem_engine(ce, engines, it) { + unsigned int class = ce->engine->uabi_class; + + GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); + atomic64_add(intel_context_get_total_runtime_ns(ce), + &client->past_runtime[class]); + } +} + +static int engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_gem_engines *engines = container_of(fence, typeof(*engines), fence); + struct i915_gem_context *ctx = engines->ctx; switch (state) { case FENCE_COMPLETE: if (!list_empty(&engines->link)) { - struct i915_gem_context *ctx = engines->ctx; unsigned long flags; spin_lock_irqsave(&ctx->stale.lock, flags); list_del(&engines->link); spin_unlock_irqrestore(&ctx->stale.lock, flags); } - i915_gem_context_put(engines->ctx); + accumulate_runtime(ctx->client, engines); + i915_gem_context_put(ctx); + break; case FENCE_FREE: @@ -301,61 +1113,214 @@ static struct i915_gem_engines *alloc_engines(unsigned int count) return e; } -static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) +static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, + struct intel_sseu rcs_sseu) { - const struct intel_gt *gt = &ctx->i915->gt; + const unsigned int max = I915_NUM_ENGINES; struct intel_engine_cs *engine; - struct i915_gem_engines *e; - enum intel_engine_id id; + struct i915_gem_engines *e, *err; - e = alloc_engines(I915_NUM_ENGINES); + e = alloc_engines(max); if (!e) return ERR_PTR(-ENOMEM); - for_each_engine(engine, gt, id) { + for_each_uabi_engine(engine, ctx->i915) { struct intel_context *ce; + struct intel_sseu sseu = {}; + int ret; if (engine->legacy_idx == INVALID_ENGINE) continue; - GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); + GEM_BUG_ON(engine->legacy_idx >= max); GEM_BUG_ON(e->engines[engine->legacy_idx]); ce = intel_context_create(engine); if (IS_ERR(ce)) { - __free_engines(e, e->num_engines + 1); - return ERR_CAST(ce); + err = ERR_CAST(ce); + goto free_engines; } - intel_context_set_gem(ce, ctx); - e->engines[engine->legacy_idx] = ce; - e->num_engines = max(e->num_engines, engine->legacy_idx); + e->num_engines = max(e->num_engines, engine->legacy_idx + 1); + + if (engine->class == RENDER_CLASS) + sseu = rcs_sseu; + + ret = intel_context_set_gem(ce, ctx, sseu); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } - e->num_engines++; return e; + +free_engines: + free_engines(e); + return err; } -void i915_gem_context_release(struct kref *ref) +static int perma_pin_contexts(struct intel_context *ce) { - struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); + struct intel_context *child; + int i = 0, j = 0, ret; + + GEM_BUG_ON(!intel_context_is_parent(ce)); + + ret = intel_context_pin(ce); + if (unlikely(ret)) + return ret; + + for_each_child(ce, child) { + ret = intel_context_pin(child); + if (unlikely(ret)) + goto unwind; + ++i; + } + + set_bit(CONTEXT_PERMA_PIN, &ce->flags); + + return 0; + +unwind: + intel_context_unpin(ce); + for_each_child(ce, child) { + if (j++ < i) + intel_context_unpin(child); + else + break; + } + + return ret; +} + +static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, + unsigned int num_engines, + struct i915_gem_proto_engine *pe) +{ + struct i915_gem_engines *e, *err; + unsigned int n; + + e = alloc_engines(num_engines); + if (!e) + return ERR_PTR(-ENOMEM); + e->num_engines = num_engines; + + for (n = 0; n < num_engines; n++) { + struct intel_context *ce, *child; + int ret; + + switch (pe[n].type) { + case I915_GEM_ENGINE_TYPE_PHYSICAL: + ce = intel_context_create(pe[n].engine); + break; + + case I915_GEM_ENGINE_TYPE_BALANCED: + ce = intel_engine_create_virtual(pe[n].siblings, + pe[n].num_siblings, 0); + break; + + case I915_GEM_ENGINE_TYPE_PARALLEL: + ce = intel_engine_create_parallel(pe[n].siblings, + pe[n].num_siblings, + pe[n].width); + break; + + case I915_GEM_ENGINE_TYPE_INVALID: + default: + GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); + continue; + } + + if (IS_ERR(ce)) { + err = ERR_CAST(ce); + goto free_engines; + } + + e->engines[n] = ce; + + ret = intel_context_set_gem(ce, ctx, pe->sseu); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + for_each_child(ce, child) { + ret = intel_context_set_gem(child, ctx, pe->sseu); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } + + /* + * XXX: Must be done after calling intel_context_set_gem as that + * function changes the ring size. The ring is allocated when + * the context is pinned. If the ring size is changed after + * allocation we have a mismatch of the ring size and will cause + * the context to hang. Presumably with a bit of reordering we + * could move the perma-pin step to the backend function + * intel_engine_create_parallel. + */ + if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { + ret = perma_pin_contexts(ce); + if (ret) { + err = ERR_PTR(ret); + goto free_engines; + } + } + } + + return e; + +free_engines: + free_engines(e); + return err; +} + +static void i915_gem_context_release_work(struct work_struct *work) +{ + struct i915_gem_context *ctx = container_of(work, typeof(*ctx), + release_work); + struct i915_address_space *vm; trace_i915_context_free(ctx); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + spin_lock(&ctx->i915->gem.contexts.lock); + list_del(&ctx->link); + spin_unlock(&ctx->i915->gem.contexts.lock); + + if (ctx->syncobj) + drm_syncobj_put(ctx->syncobj); + + vm = ctx->vm; + if (vm) + i915_vm_put(vm); + + if (ctx->pxp_wakeref) + intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); + + if (ctx->client) + i915_drm_client_put(ctx->client); + mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); - if (ctx->timeline) - intel_timeline_put(ctx->timeline); - put_pid(ctx->pid); mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); } +void i915_gem_context_release(struct kref *ref) +{ + struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); + + queue_work(ctx->i915->wq, &ctx->release_work); +} + static inline struct i915_gem_engines * __context_engines_static(const struct i915_gem_context *ctx) { @@ -426,7 +1391,8 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) return engine; } -static void kill_engines(struct i915_gem_engines *engines, bool ban) +static void +kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) { struct i915_gem_engines_iter it; struct intel_context *ce; @@ -441,8 +1407,8 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban) for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine; - if (ban && intel_context_set_banned(ce)) - continue; + if ((exit || !persistent) && intel_context_revoke(ce)) + continue; /* Already marked. */ /* * Check the current active state of this context; if we @@ -454,7 +1420,7 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban) engine = active_engine(ce); /* First attempt to gracefully cancel the context */ - if (engine && !__cancel_engine(engine) && ban) + if (engine && !__cancel_engine(engine) && (exit || !persistent)) /* * If we are unable to send a preemptive pulse to bump * the context from the GPU, we have to resort to a full @@ -466,8 +1432,6 @@ static void kill_engines(struct i915_gem_engines *engines, bool ban) static void kill_context(struct i915_gem_context *ctx) { - bool ban = (!i915_gem_context_is_persistent(ctx) || - !ctx->i915->params.enable_hangcheck); struct i915_gem_engines *pos, *next; spin_lock_irq(&ctx->stale.lock); @@ -480,7 +1444,8 @@ static void kill_context(struct i915_gem_context *ctx) spin_unlock_irq(&ctx->stale.lock); - kill_engines(pos, ban); + kill_engines(pos, !ctx->i915->params.enable_hangcheck, + i915_gem_context_is_persistent(ctx)); spin_lock_irq(&ctx->stale.lock); GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); @@ -506,7 +1471,7 @@ static void engines_idle_release(struct i915_gem_context *ctx, int err; /* serialises with execbuf */ - set_bit(CONTEXT_CLOSED_BIT, &ce->flags); + intel_context_close(ce); if (!intel_context_pin_if_active(ce)) continue; @@ -526,7 +1491,8 @@ static void engines_idle_release(struct i915_gem_context *ctx, kill: if (list_empty(&engines->link)) /* raced, already closed */ - kill_engines(engines, true); + kill_engines(engines, true, + i915_gem_context_is_persistent(ctx)); i915_sw_fence_commit(&engines->fence); } @@ -550,10 +1516,11 @@ static void set_closed_name(struct i915_gem_context *ctx) static void context_close(struct i915_gem_context *ctx) { - struct i915_address_space *vm; + struct i915_drm_client *client; /* Flush any concurrent set_engines() */ mutex_lock(&ctx->engines_mutex); + unpin_engines(__context_engines_static(ctx)); engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); i915_gem_context_set_closed(ctx); mutex_unlock(&ctx->engines_mutex); @@ -562,12 +1529,6 @@ static void context_close(struct i915_gem_context *ctx) set_closed_name(ctx); - vm = i915_gem_context_vm(ctx); - if (vm) - i915_vm_close(vm); - - ctx->file_priv = ERR_PTR(-EBADF); - /* * The LUT uses the VMA as a backpointer to unref the object, * so we need to clear the LUT before we close all the VMA (inside @@ -575,9 +1536,14 @@ static void context_close(struct i915_gem_context *ctx) */ lut_close(ctx); - spin_lock(&ctx->i915->gem.contexts.lock); - list_del(&ctx->link); - spin_unlock(&ctx->i915->gem.contexts.lock); + ctx->file_priv = ERR_PTR(-EBADF); + + client = ctx->client; + if (client) { + spin_lock(&client->ctx_lock); + list_del_rcu(&ctx->client_link); + spin_unlock(&client->ctx_lock); + } mutex_unlock(&ctx->mutex); @@ -623,10 +1589,10 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing - * colateral damage, and we should not pretend we can by + * collateral damage, and we should not pretend we can by * exposing the interface. */ - if (!intel_has_reset_engine(&ctx->i915->gt)) + if (!intel_has_reset_engine(to_gt(ctx->i915))) return -ENODEV; i915_gem_context_clear_persistence(ctx); @@ -636,9 +1602,11 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state) } static struct i915_gem_context * -__create_context(struct drm_i915_private *i915) +i915_gem_create_context(struct drm_i915_private *i915, + const struct i915_gem_proto_context *pc) { struct i915_gem_context *ctx; + struct i915_address_space *vm = NULL; struct i915_gem_engines *e; int err; int i; @@ -649,18 +1617,46 @@ __create_context(struct drm_i915_private *i915) kref_init(&ctx->ref); ctx->i915 = i915; - ctx->sched.priority = I915_PRIORITY_NORMAL; + ctx->sched = pc->sched; mutex_init(&ctx->mutex); INIT_LIST_HEAD(&ctx->link); + INIT_WORK(&ctx->release_work, i915_gem_context_release_work); spin_lock_init(&ctx->stale.lock); INIT_LIST_HEAD(&ctx->stale.engines); + if (pc->vm) { + vm = i915_vm_get(pc->vm); + } else if (HAS_FULL_PPGTT(i915)) { + struct i915_ppgtt *ppgtt; + + ppgtt = i915_ppgtt_create(to_gt(i915), 0); + if (IS_ERR(ppgtt)) { + drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", + PTR_ERR(ppgtt)); + err = PTR_ERR(ppgtt); + goto err_ctx; + } + ppgtt->vm.fpriv = pc->fpriv; + vm = &ppgtt->vm; + } + if (vm) + ctx->vm = vm; + + /* Assign early so intel_context_set_gem can access these flags */ + ctx->user_flags = pc->user_flags; + mutex_init(&ctx->engines_mutex); - e = default_engines(ctx); + if (pc->num_user_engines >= 0) { + i915_gem_context_set_user_engines(ctx); + e = user_engines(ctx, pc->num_user_engines, pc->user_engines); + } else { + i915_gem_context_clear_user_engines(ctx); + e = default_engines(ctx, pc->legacy_rcs_sseu); + } if (IS_ERR(e)) { err = PTR_ERR(e); - goto err_free; + goto err_vm; } RCU_INIT_POINTER(ctx->engines, e); @@ -672,210 +1668,34 @@ __create_context(struct drm_i915_private *i915) * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(i915); - i915_gem_context_set_bannable(ctx); - i915_gem_context_set_recoverable(ctx); - __context_set_persistence(ctx, true /* cgroup hook? */); - for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; - return ctx; - -err_free: - kfree(ctx); - return ERR_PTR(err); -} - -static inline struct i915_gem_engines * -__context_engines_await(const struct i915_gem_context *ctx, - bool *user_engines) -{ - struct i915_gem_engines *engines; - - rcu_read_lock(); - do { - engines = rcu_dereference(ctx->engines); - GEM_BUG_ON(!engines); - - if (user_engines) - *user_engines = i915_gem_context_user_engines(ctx); - - /* successful await => strong mb */ - if (unlikely(!i915_sw_fence_await(&engines->fence))) - continue; - - if (likely(engines == rcu_access_pointer(ctx->engines))) - break; - - i915_sw_fence_complete(&engines->fence); - } while (1); - rcu_read_unlock(); - - return engines; -} - -static int -context_apply_all(struct i915_gem_context *ctx, - int (*fn)(struct intel_context *ce, void *data), - void *data) -{ - struct i915_gem_engines_iter it; - struct i915_gem_engines *e; - struct intel_context *ce; - int err = 0; - - e = __context_engines_await(ctx, NULL); - for_each_gem_engine(ce, e, it) { - err = fn(ce, data); + if (pc->single_timeline) { + err = drm_syncobj_create(&ctx->syncobj, + DRM_SYNCOBJ_CREATE_SIGNALED, + NULL); if (err) - break; - } - i915_sw_fence_complete(&e->fence); - - return err; -} - -static int __apply_ppgtt(struct intel_context *ce, void *vm) -{ - i915_vm_put(ce->vm); - ce->vm = i915_vm_get(vm); - return 0; -} - -static struct i915_address_space * -__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) -{ - struct i915_address_space *old; - - old = rcu_replace_pointer(ctx->vm, - i915_vm_open(vm), - lockdep_is_held(&ctx->mutex)); - GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); - - context_apply_all(ctx, __apply_ppgtt, vm); - - return old; -} - -static void __assign_ppgtt(struct i915_gem_context *ctx, - struct i915_address_space *vm) -{ - if (vm == rcu_access_pointer(ctx->vm)) - return; - - vm = __set_ppgtt(ctx, vm); - if (vm) - i915_vm_close(vm); -} - -static void __set_timeline(struct intel_timeline **dst, - struct intel_timeline *src) -{ - struct intel_timeline *old = *dst; - - *dst = src ? intel_timeline_get(src) : NULL; - - if (old) - intel_timeline_put(old); -} - -static int __apply_timeline(struct intel_context *ce, void *timeline) -{ - __set_timeline(&ce->timeline, timeline); - return 0; -} - -static void __assign_timeline(struct i915_gem_context *ctx, - struct intel_timeline *timeline) -{ - __set_timeline(&ctx->timeline, timeline); - context_apply_all(ctx, __apply_timeline, timeline); -} - -static int __apply_watchdog(struct intel_context *ce, void *timeout_us) -{ - return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us); -} - -static int -__set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us) -{ - int ret; - - ret = context_apply_all(ctx, __apply_watchdog, - (void *)(uintptr_t)timeout_us); - if (!ret) - ctx->watchdog.timeout_us = timeout_us; - - return ret; -} - -static void __set_default_fence_expiry(struct i915_gem_context *ctx) -{ - struct drm_i915_private *i915 = ctx->i915; - int ret; - - if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) || - !i915->params.request_timeout_ms) - return; - - /* Default expiry for user fences. */ - ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000); - if (ret) - drm_notice(&i915->drm, - "Failed to configure default fence expiry! (%d)", - ret); -} - -static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) -{ - struct i915_gem_context *ctx; - - if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && - !HAS_EXECLISTS(i915)) - return ERR_PTR(-EINVAL); - - ctx = __create_context(i915); - if (IS_ERR(ctx)) - return ctx; - - if (HAS_FULL_PPGTT(i915)) { - struct i915_ppgtt *ppgtt; - - ppgtt = i915_ppgtt_create(&i915->gt); - if (IS_ERR(ppgtt)) { - drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", - PTR_ERR(ppgtt)); - context_close(ctx); - return ERR_CAST(ppgtt); - } - - mutex_lock(&ctx->mutex); - __assign_ppgtt(ctx, &ppgtt->vm); - mutex_unlock(&ctx->mutex); - - i915_vm_put(&ppgtt->vm); + goto err_engines; } - if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { - struct intel_timeline *timeline; - - timeline = intel_timeline_create(&i915->gt); - if (IS_ERR(timeline)) { - context_close(ctx); - return ERR_CAST(timeline); - } - - __assign_timeline(ctx, timeline); - intel_timeline_put(timeline); + if (pc->uses_protected_content) { + ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); + ctx->uses_protected_content = true; } - __set_default_fence_expiry(ctx); - trace_i915_context_create(ctx); return ctx; + +err_engines: + free_engines(e); +err_vm: + if (ctx->vm) + i915_vm_put(ctx->vm); +err_ctx: + kfree(ctx); + return ERR_PTR(err); } static void init_contexts(struct i915_gem_contexts *gc) @@ -889,83 +1709,93 @@ void i915_gem_init__contexts(struct drm_i915_private *i915) init_contexts(&i915->gem.contexts); } -static int gem_context_register(struct i915_gem_context *ctx, - struct drm_i915_file_private *fpriv, - u32 *id) +/* + * Note that this implicitly consumes the ctx reference, by placing + * the ctx in the context_xa. + */ +static void gem_context_register(struct i915_gem_context *ctx, + struct drm_i915_file_private *fpriv, + u32 id) { struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm; - int ret; + void *old; ctx->file_priv = fpriv; - mutex_lock(&ctx->mutex); - vm = i915_gem_context_vm(ctx); - if (vm) - WRITE_ONCE(vm->file, fpriv); /* XXX */ - mutex_unlock(&ctx->mutex); - ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->client = i915_drm_client_get(fpriv->client); + snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", current->comm, pid_nr(ctx->pid)); - /* And finally expose ourselves to userspace via the idr */ - ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); - if (ret) - goto err_pid; + spin_lock(&ctx->client->ctx_lock); + list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); + spin_unlock(&ctx->client->ctx_lock); spin_lock(&i915->gem.contexts.lock); list_add_tail(&ctx->link, &i915->gem.contexts.list); spin_unlock(&i915->gem.contexts.lock); - return 0; - -err_pid: - put_pid(fetch_and_zero(&ctx->pid)); - return ret; + /* And finally expose ourselves to userspace via the idr */ + old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); + WARN_ON(old); } int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; int err; - u32 id; - xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); + mutex_init(&file_priv->proto_context_lock); + xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); + + /* 0 reserved for the default context */ + xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); /* 0 reserved for invalid/unassigned ppgtt */ xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); - ctx = i915_gem_create_context(i915, 0); + pc = proto_context_create(file_priv, i915, 0); + if (IS_ERR(pc)) { + err = PTR_ERR(pc); + goto err; + } + + ctx = i915_gem_create_context(i915, pc); + proto_context_close(i915, pc); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err; } - err = gem_context_register(ctx, file_priv, &id); - if (err < 0) - goto err_ctx; + gem_context_register(ctx, file_priv, 0); - GEM_BUG_ON(id); return 0; -err_ctx: - context_close(ctx); err: xa_destroy(&file_priv->vm_xa); xa_destroy(&file_priv->context_xa); + xa_destroy(&file_priv->proto_context_xa); + mutex_destroy(&file_priv->proto_context_lock); return err; } void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_proto_context *pc; struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx; + xa_for_each(&file_priv->proto_context_xa, idx, pc) + proto_context_close(file_priv->i915, pc); + xa_destroy(&file_priv->proto_context_xa); + mutex_destroy(&file_priv->proto_context_lock); + xa_for_each(&file_priv->context_xa, idx, ctx) context_close(ctx); xa_destroy(&file_priv->context_xa); @@ -991,12 +1821,10 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags) return -EINVAL; - ppgtt = i915_ppgtt_create(&i915->gt); + ppgtt = i915_ppgtt_create(to_gt(i915), 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); - ppgtt->vm.file = file_priv; - if (args->extensions) { err = i915_user_extensions(u64_to_user_ptr(args->extensions), NULL, 0, @@ -1012,6 +1840,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ args->vm_id = id; + ppgtt->vm.fpriv = file_priv; return 0; err_put: @@ -1040,120 +1869,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, return 0; } -struct context_barrier_task { - struct i915_active base; - void (*task)(void *data); - void *data; -}; - -static void cb_retire(struct i915_active *base) -{ - struct context_barrier_task *cb = container_of(base, typeof(*cb), base); - - if (cb->task) - cb->task(cb->data); - - i915_active_fini(&cb->base); - kfree(cb); -} - -I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); -static int context_barrier_task(struct i915_gem_context *ctx, - intel_engine_mask_t engines, - bool (*skip)(struct intel_context *ce, void *data), - int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data), - int (*emit)(struct i915_request *rq, void *data), - void (*task)(void *data), - void *data) -{ - struct context_barrier_task *cb; - struct i915_gem_engines_iter it; - struct i915_gem_engines *e; - struct i915_gem_ww_ctx ww; - struct intel_context *ce; - int err = 0; - - GEM_BUG_ON(!task); - - cb = kmalloc(sizeof(*cb), GFP_KERNEL); - if (!cb) - return -ENOMEM; - - i915_active_init(&cb->base, NULL, cb_retire, 0); - err = i915_active_acquire(&cb->base); - if (err) { - kfree(cb); - return err; - } - - e = __context_engines_await(ctx, NULL); - if (!e) { - i915_active_release(&cb->base); - return -ENOENT; - } - - for_each_gem_engine(ce, e, it) { - struct i915_request *rq; - - if (I915_SELFTEST_ONLY(context_barrier_inject_fault & - ce->engine->mask)) { - err = -ENXIO; - break; - } - - if (!(ce->engine->mask & engines)) - continue; - - if (skip && skip(ce, data)) - continue; - - i915_gem_ww_ctx_init(&ww, true); -retry: - err = intel_context_pin_ww(ce, &ww); - if (err) - goto err; - - if (pin) - err = pin(ce, &ww, data); - if (err) - goto err_unpin; - - rq = i915_request_create(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_unpin; - } - - err = 0; - if (emit) - err = emit(rq, data); - if (err == 0) - err = i915_active_add_request(&cb->base, rq); - - i915_request_add(rq); -err_unpin: - intel_context_unpin(ce); -err: - if (err == -EDEADLK) { - err = i915_gem_ww_ctx_backoff(&ww); - if (!err) - goto retry; - } - i915_gem_ww_ctx_fini(&ww); - - if (err) - break; - } - i915_sw_fence_complete(&e->fence); - - cb->task = err ? NULL : task; /* caller needs to unwind instead */ - cb->data = data; - - i915_active_release(&cb->base); - - return err; -} - static int get_ppgtt(struct drm_i915_file_private *file_priv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -1162,235 +1877,33 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, int err; u32 id; - if (!rcu_access_pointer(ctx->vm)) + if (!i915_gem_context_has_full_ppgtt(ctx)) return -ENODEV; - rcu_read_lock(); - vm = context_get_vm_rcu(ctx); - rcu_read_unlock(); - if (!vm) - return -ENODEV; + vm = ctx->vm; + GEM_BUG_ON(!vm); - err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); - if (err) - goto err_put; + /* + * Get a reference for the allocated handle. Once the handle is + * visible in the vm_xa table, userspace could try to close it + * from under our feet, so we need to hold the extra reference + * first. + */ + i915_vm_get(vm); - i915_vm_open(vm); + err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); + if (err) { + i915_vm_put(vm); + return err; + } GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ args->value = id; args->size = 0; -err_put: - i915_vm_put(vm); - return err; -} - -static void set_ppgtt_barrier(void *data) -{ - struct i915_address_space *old = data; - - if (GRAPHICS_VER(old->i915) < 8) - gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); - - i915_vm_close(old); -} - -static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data) -{ - struct i915_address_space *vm = ce->vm; - - if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915)) - /* ppGTT is not part of the legacy context image */ - return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww); - - return 0; -} - -static int emit_ppgtt_update(struct i915_request *rq, void *data) -{ - struct i915_address_space *vm = rq->context->vm; - struct intel_engine_cs *engine = rq->engine; - u32 base = engine->mmio_base; - u32 *cs; - int i; - - if (i915_vm_is_4lvl(vm)) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - const dma_addr_t pd_daddr = px_dma(ppgtt->pd); - - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2); - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); - *cs++ = lower_32_bits(pd_daddr); - - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - int err; - - /* Magic required to prevent forcewake errors! */ - err = engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - return err; - - cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; - for (i = GEN8_3LVL_PDPES; i--; ) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); - *cs++ = lower_32_bits(pd_daddr); - } - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - } - - return 0; -} - -static bool skip_ppgtt_update(struct intel_context *ce, void *data) -{ - if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) - return !ce->state; - else - return !atomic_read(&ce->pin_count); -} - -static int set_ppgtt(struct drm_i915_file_private *file_priv, - struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct i915_address_space *vm, *old; - int err; - - if (args->size) - return -EINVAL; - - if (!rcu_access_pointer(ctx->vm)) - return -ENODEV; - - if (upper_32_bits(args->value)) - return -ENOENT; - - rcu_read_lock(); - vm = xa_load(&file_priv->vm_xa, args->value); - if (vm && !kref_get_unless_zero(&vm->ref)) - vm = NULL; - rcu_read_unlock(); - if (!vm) - return -ENOENT; - - err = mutex_lock_interruptible(&ctx->mutex); - if (err) - goto out; - - if (i915_gem_context_is_closed(ctx)) { - err = -ENOENT; - goto unlock; - } - - if (vm == rcu_access_pointer(ctx->vm)) - goto unlock; - - old = __set_ppgtt(ctx, vm); - - /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ - lut_close(ctx); - - /* - * We need to flush any requests using the current ppgtt before - * we release it as the requests do not hold a reference themselves, - * only indirectly through the context. - */ - err = context_barrier_task(ctx, ALL_ENGINES, - skip_ppgtt_update, - pin_ppgtt_update, - emit_ppgtt_update, - set_ppgtt_barrier, - old); - if (err) { - i915_vm_close(__set_ppgtt(ctx, old)); - i915_vm_close(old); - lut_close(ctx); /* force a rebuild of the old obj:vma cache */ - } - -unlock: - mutex_unlock(&ctx->mutex); -out: - i915_vm_put(vm); return err; } -static int __apply_ringsize(struct intel_context *ce, void *sz) -{ - return intel_context_set_ring_size(ce, (unsigned long)sz); -} - -static int set_ringsize(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) - return -ENODEV; - - if (args->size) - return -EINVAL; - - if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE)) - return -EINVAL; - - if (args->value < I915_GTT_PAGE_SIZE) - return -EINVAL; - - if (args->value > 128 * I915_GTT_PAGE_SIZE) - return -EINVAL; - - return context_apply_all(ctx, - __apply_ringsize, - __intel_context_ring_size(args->value)); -} - -static int __get_ringsize(struct intel_context *ce, void *arg) -{ - long sz; - - sz = intel_context_get_ring_size(ce); - GEM_BUG_ON(sz > INT_MAX); - - return sz; /* stop on first engine */ -} - -static int get_ringsize(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - int sz; - - if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) - return -ENODEV; - - if (args->size) - return -EINVAL; - - sz = context_apply_all(ctx, __get_ringsize, NULL); - if (sz < 0) - return sz; - - args->value = sz; - return 0; -} - int i915_gem_user_to_context_sseu(struct intel_gt *gt, const struct drm_i915_gem_context_param_sseu *user, @@ -1398,6 +1911,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt, { const struct sseu_dev_info *device = >->info.sseu; struct drm_i915_private *i915 = gt->i915; + unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0); /* No zeros in any field. */ if (!user->slice_mask || !user->subslice_mask || @@ -1424,7 +1938,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt, if (user->slice_mask & ~device->slice_mask) return -EINVAL; - if (user->subslice_mask & ~device->subslice_mask[0]) + if (user->subslice_mask & ~dev_subslice_mask) return -EINVAL; if (user->max_eus_per_subslice > device->max_eus_per_subslice) @@ -1438,7 +1952,7 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt, /* Part specific restrictions. */ if (GRAPHICS_VER(i915) == 11) { unsigned int hw_s = hweight8(device->slice_mask); - unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); + unsigned int hw_ss_per_s = hweight8(dev_subslice_mask); unsigned int req_s = hweight8(context->slice_mask); unsigned int req_ss = hweight8(context->subslice_mask); @@ -1545,430 +2059,134 @@ out_ce: return ret; } -struct set_engines { - struct i915_gem_context *ctx; - struct i915_gem_engines *engines; -}; - static int -set_engines__load_balance(struct i915_user_extension __user *base, void *data) +set_persistence(struct i915_gem_context *ctx, + const struct drm_i915_gem_context_param *args) { - struct i915_context_engines_load_balance __user *ext = - container_of_user(base, typeof(*ext), base); - const struct set_engines *set = data; - struct drm_i915_private *i915 = set->ctx->i915; - struct intel_engine_cs *stack[16]; - struct intel_engine_cs **siblings; - struct intel_context *ce; - u16 num_siblings, idx; - unsigned int n; - int err; - - if (!HAS_EXECLISTS(i915)) - return -ENODEV; - - if (intel_uc_uses_guc_submission(&i915->gt.uc)) - return -ENODEV; /* not implement yet */ - - if (get_user(idx, &ext->engine_index)) - return -EFAULT; - - if (idx >= set->engines->num_engines) { - drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", - idx, set->engines->num_engines); + if (args->size) return -EINVAL; - } - idx = array_index_nospec(idx, set->engines->num_engines); - if (set->engines->engines[idx]) { - drm_dbg(&i915->drm, - "Invalid placement[%d], already occupied\n", idx); - return -EEXIST; - } - - if (get_user(num_siblings, &ext->num_siblings)) - return -EFAULT; + return __context_set_persistence(ctx, args->value); +} - err = check_user_mbz(&ext->flags); - if (err) - return err; +static int set_priority(struct i915_gem_context *ctx, + const struct drm_i915_gem_context_param *args) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + int err; - err = check_user_mbz(&ext->mbz64); + err = validate_priority(ctx->i915, args); if (err) return err; - siblings = stack; - if (num_siblings > ARRAY_SIZE(stack)) { - siblings = kmalloc_array(num_siblings, - sizeof(*siblings), - GFP_KERNEL); - if (!siblings) - return -ENOMEM; - } - - for (n = 0; n < num_siblings; n++) { - struct i915_engine_class_instance ci; - - if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { - err = -EFAULT; - goto out_siblings; - } + ctx->sched.priority = args->value; - siblings[n] = intel_engine_lookup_user(i915, - ci.engine_class, - ci.engine_instance); - if (!siblings[n]) { - drm_dbg(&i915->drm, - "Invalid sibling[%d]: { class:%d, inst:%d }\n", - n, ci.engine_class, ci.engine_instance); - err = -EINVAL; - goto out_siblings; - } - } + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_has_timeslices(ce->engine)) + continue; - ce = intel_execlists_create_virtual(siblings, n); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out_siblings; + if (ctx->sched.priority >= I915_PRIORITY_NORMAL && + intel_engine_has_semaphores(ce->engine)) + intel_context_set_use_semaphores(ce); + else + intel_context_clear_use_semaphores(ce); } + i915_gem_context_unlock_engines(ctx); - intel_context_set_gem(ce, set->ctx); - - if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { - intel_context_put(ce); - err = -EEXIST; - goto out_siblings; - } + return 0; +} -out_siblings: - if (siblings != stack) - kfree(siblings); +static int get_protected(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + args->size = 0; + args->value = i915_gem_context_uses_protected_content(ctx); - return err; + return 0; } -static int -set_engines__bond(struct i915_user_extension __user *base, void *data) +static int set_context_image(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - struct i915_context_engines_bond __user *ext = - container_of_user(base, typeof(*ext), base); - const struct set_engines *set = data; - struct drm_i915_private *i915 = set->ctx->i915; - struct i915_engine_class_instance ci; - struct intel_engine_cs *virtual; - struct intel_engine_cs *master; - u16 idx, num_bonds; - int err, n; - - if (get_user(idx, &ext->virtual_index)) - return -EFAULT; + struct i915_gem_context_param_context_image user; + struct intel_context *ce; + struct file *shmem_state; + unsigned long lookup; + void *state; + int ret = 0; - if (idx >= set->engines->num_engines) { - drm_dbg(&i915->drm, - "Invalid index for virtual engine: %d >= %d\n", - idx, set->engines->num_engines); + if (!IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) return -EINVAL; - } - idx = array_index_nospec(idx, set->engines->num_engines); - if (!set->engines->engines[idx]) { - drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); + if (!ctx->i915->params.enable_debug_only_api) return -EINVAL; - } - virtual = set->engines->engines[idx]->engine; - err = check_user_mbz(&ext->flags); - if (err) - return err; - - for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { - err = check_user_mbz(&ext->mbz64[n]); - if (err) - return err; - } - - if (copy_from_user(&ci, &ext->master, sizeof(ci))) - return -EFAULT; - - master = intel_engine_lookup_user(i915, - ci.engine_class, ci.engine_instance); - if (!master) { - drm_dbg(&i915->drm, - "Unrecognised master engine: { class:%u, instance:%u }\n", - ci.engine_class, ci.engine_instance); + if (args->size < sizeof(user)) return -EINVAL; - } - if (get_user(num_bonds, &ext->num_bonds)) + if (copy_from_user(&user, u64_to_user_ptr(args->value), sizeof(user))) return -EFAULT; - for (n = 0; n < num_bonds; n++) { - struct intel_engine_cs *bond; - - if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) - return -EFAULT; - - bond = intel_engine_lookup_user(i915, - ci.engine_class, - ci.engine_instance); - if (!bond) { - drm_dbg(&i915->drm, - "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", - n, ci.engine_class, ci.engine_instance); - return -EINVAL; - } - - /* - * A non-virtual engine has no siblings to choose between; and - * a submit fence will always be directed to the one engine. - */ - if (intel_engine_is_virtual(virtual)) { - err = intel_virtual_engine_attach_bond(virtual, - master, - bond); - if (err) - return err; - } - } - - return 0; -} - -static const i915_user_extension_fn set_engines__extensions[] = { - [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, - [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, -}; - -static int -set_engines(struct i915_gem_context *ctx, - const struct drm_i915_gem_context_param *args) -{ - struct drm_i915_private *i915 = ctx->i915; - struct i915_context_param_engines __user *user = - u64_to_user_ptr(args->value); - struct set_engines set = { .ctx = ctx }; - unsigned int num_engines, n; - u64 extensions; - int err; - - if (!args->size) { /* switch back to legacy user_ring_map */ - if (!i915_gem_context_user_engines(ctx)) - return 0; - - set.engines = default_engines(ctx); - if (IS_ERR(set.engines)) - return PTR_ERR(set.engines); - - goto replace; - } - - BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); - if (args->size < sizeof(*user) || - !IS_ALIGNED(args->size, sizeof(*user->engines))) { - drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", - args->size); + if (user.mbz) return -EINVAL; - } - - /* - * Note that I915_EXEC_RING_MASK limits execbuf to only using the - * first 64 engines defined here. - */ - num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); - set.engines = alloc_engines(num_engines); - if (!set.engines) - return -ENOMEM; - - for (n = 0; n < num_engines; n++) { - struct i915_engine_class_instance ci; - struct intel_engine_cs *engine; - struct intel_context *ce; - - if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { - __free_engines(set.engines, n); - return -EFAULT; - } - - if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && - ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { - set.engines->engines[n] = NULL; - continue; - } - - engine = intel_engine_lookup_user(ctx->i915, - ci.engine_class, - ci.engine_instance); - if (!engine) { - drm_dbg(&i915->drm, - "Invalid engine[%d]: { class:%d, instance:%d }\n", - n, ci.engine_class, ci.engine_instance); - __free_engines(set.engines, n); - return -ENOENT; - } - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - __free_engines(set.engines, n); - return PTR_ERR(ce); - } - - intel_context_set_gem(ce, ctx); - - set.engines->engines[n] = ce; - } - set.engines->num_engines = num_engines; - - err = -EFAULT; - if (!get_user(extensions, &user->extensions)) - err = i915_user_extensions(u64_to_user_ptr(extensions), - set_engines__extensions, - ARRAY_SIZE(set_engines__extensions), - &set); - if (err) { - free_engines(set.engines); - return err; - } - -replace: - mutex_lock(&ctx->engines_mutex); - if (i915_gem_context_is_closed(ctx)) { - mutex_unlock(&ctx->engines_mutex); - free_engines(set.engines); - return -ENOENT; - } - if (args->size) - i915_gem_context_set_user_engines(ctx); - else - i915_gem_context_clear_user_engines(ctx); - set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); - mutex_unlock(&ctx->engines_mutex); - - /* Keep track of old engine sets for kill_context() */ - engines_idle_release(ctx, set.engines); - - return 0; -} - -static int -get_engines(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) -{ - struct i915_context_param_engines __user *user; - struct i915_gem_engines *e; - size_t n, count, size; - bool user_engines; - int err = 0; - - e = __context_engines_await(ctx, &user_engines); - if (!e) - return -ENOENT; + if (user.flags & ~(I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)) + return -EINVAL; - if (!user_engines) { - i915_sw_fence_complete(&e->fence); - args->size = 0; - return 0; - } + lookup = 0; + if (user.flags & I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX) + lookup |= LOOKUP_USER_INDEX; - count = e->num_engines; + ce = lookup_user_engine(ctx, lookup, &user.engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); - /* Be paranoid in case we have an impedance mismatch */ - if (!check_struct_size(user, engines, count, &size)) { - err = -EINVAL; - goto err_free; - } - if (overflows_type(size, args->size)) { - err = -EINVAL; - goto err_free; + if (user.size < ce->engine->context_size) { + ret = -EINVAL; + goto out_ce; } - if (!args->size) { - args->size = size; - goto err_free; + if (drm_WARN_ON_ONCE(&ctx->i915->drm, + test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { + /* + * This is racy but for a debug only API, if userspace is keen + * to create and configure contexts, while simultaneously using + * them from a second thread, let them suffer by potentially not + * executing with the context image they just raced to apply. + */ + ret = -EBUSY; + goto out_ce; } - if (args->size < size) { - err = -EINVAL; - goto err_free; + state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size); + if (IS_ERR(state)) { + ret = PTR_ERR(state); + goto out_ce; } - user = u64_to_user_ptr(args->value); - if (put_user(0, &user->extensions)) { - err = -EFAULT; - goto err_free; + shmem_state = shmem_create_from_data(ce->engine->name, + state, ce->engine->context_size); + if (IS_ERR(shmem_state)) { + ret = PTR_ERR(shmem_state); + goto out_state; } - for (n = 0; n < count; n++) { - struct i915_engine_class_instance ci = { - .engine_class = I915_ENGINE_CLASS_INVALID, - .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, - }; - - if (e->engines[n]) { - ci.engine_class = e->engines[n]->engine->uabi_class; - ci.engine_instance = e->engines[n]->engine->uabi_instance; - } - - if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { - err = -EFAULT; - goto err_free; - } + if (intel_context_set_own_state(ce)) { + ret = -EBUSY; + fput(shmem_state); + goto out_state; } - args->size = size; - -err_free: - i915_sw_fence_complete(&e->fence); - return err; -} + ce->default_state = shmem_state; -static int -set_persistence(struct i915_gem_context *ctx, - const struct drm_i915_gem_context_param *args) -{ - if (args->size) - return -EINVAL; - - return __context_set_persistence(ctx, args->value); -} + args->size = sizeof(user); -static int __apply_priority(struct intel_context *ce, void *arg) -{ - struct i915_gem_context *ctx = arg; - - if (!intel_engine_has_timeslices(ce->engine)) - return 0; - - if (ctx->sched.priority >= I915_PRIORITY_NORMAL) - intel_context_set_use_semaphores(ce); - else - intel_context_clear_use_semaphores(ce); - - return 0; -} - -static int set_priority(struct i915_gem_context *ctx, - const struct drm_i915_gem_context_param *args) -{ - s64 priority = args->value; - - if (args->size) - return -EINVAL; - - if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) - return -ENODEV; - - if (priority > I915_CONTEXT_MAX_USER_PRIORITY || - priority < I915_CONTEXT_MIN_USER_PRIORITY) - return -EINVAL; - - if (priority > I915_CONTEXT_DEFAULT_PRIORITY && - !capable(CAP_SYS_NICE)) - return -EPERM; - - ctx->sched.priority = priority; - context_apply_all(ctx, __apply_priority, ctx); - - return 0; +out_state: + kfree(state); +out_ce: + intel_context_put(ce); + return ret; } static int ctx_setparam(struct drm_i915_file_private *fpriv, @@ -1978,15 +2196,6 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, int ret = 0; switch (args->param) { - case I915_CONTEXT_PARAM_NO_ZEROMAP: - if (args->size) - ret = -EINVAL; - else if (args->value) - set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - else - clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size) ret = -EINVAL; @@ -2003,6 +2212,8 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = -EPERM; else if (args->value) i915_gem_context_set_bannable(ctx); + else if (i915_gem_context_uses_protected_content(ctx)) + ret = -EPERM; /* can't clear this for protected contexts */ else i915_gem_context_clear_bannable(ctx); break; @@ -2010,10 +2221,12 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size) ret = -EINVAL; - else if (args->value) - i915_gem_context_set_recoverable(ctx); - else + else if (!args->value) i915_gem_context_clear_recoverable(ctx); + else if (i915_gem_context_uses_protected_content(ctx)) + ret = -EPERM; /* can't set this for protected contexts */ + else + i915_gem_context_set_recoverable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: @@ -2024,23 +2237,20 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_sseu(ctx, args); break; - case I915_CONTEXT_PARAM_VM: - ret = set_ppgtt(fpriv, ctx, args); - break; - - case I915_CONTEXT_PARAM_ENGINES: - ret = set_engines(ctx, args); - break; - case I915_CONTEXT_PARAM_PERSISTENCE: ret = set_persistence(ctx, args); break; - case I915_CONTEXT_PARAM_RINGSIZE: - ret = set_ringsize(ctx, args); + case I915_CONTEXT_PARAM_CONTEXT_IMAGE: + ret = set_context_image(ctx, args); break; + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: + case I915_CONTEXT_PARAM_RINGSIZE: + case I915_CONTEXT_PARAM_VM: + case I915_CONTEXT_PARAM_ENGINES: default: ret = -EINVAL; break; @@ -2050,7 +2260,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, } struct create_ext { - struct i915_gem_context *ctx; + struct i915_gem_proto_context *pc; struct drm_i915_file_private *fpriv; }; @@ -2065,233 +2275,92 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data) if (local.param.ctx_id) return -EINVAL; - return ctx_setparam(arg->fpriv, arg->ctx, &local.param); + return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); } -static int copy_ring_size(struct intel_context *dst, - struct intel_context *src) +static int invalid_ext(struct i915_user_extension __user *ext, void *data) { - long sz; - - sz = intel_context_get_ring_size(src); - if (sz < 0) - return sz; - - return intel_context_set_ring_size(dst, sz); -} - -static int clone_engines(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - struct i915_gem_engines *clone, *e; - bool user_engines; - unsigned long n; - - e = __context_engines_await(src, &user_engines); - if (!e) - return -ENOENT; - - clone = alloc_engines(e->num_engines); - if (!clone) - goto err_unlock; - - for (n = 0; n < e->num_engines; n++) { - struct intel_engine_cs *engine; - - if (!e->engines[n]) { - clone->engines[n] = NULL; - continue; - } - engine = e->engines[n]->engine; - - /* - * Virtual engines are singletons; they can only exist - * inside a single context, because they embed their - * HW context... As each virtual context implies a single - * timeline (each engine can only dequeue a single request - * at any time), it would be surprising for two contexts - * to use the same engine. So let's create a copy of - * the virtual engine instead. - */ - if (intel_engine_is_virtual(engine)) - clone->engines[n] = - intel_execlists_clone_virtual(engine); - else - clone->engines[n] = intel_context_create(engine); - if (IS_ERR_OR_NULL(clone->engines[n])) { - __free_engines(clone, n); - goto err_unlock; - } - - intel_context_set_gem(clone->engines[n], dst); - - /* Copy across the preferred ringsize */ - if (copy_ring_size(clone->engines[n], e->engines[n])) { - __free_engines(clone, n + 1); - goto err_unlock; - } - } - clone->num_engines = n; - i915_sw_fence_complete(&e->fence); - - /* Serialised by constructor */ - engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); - if (user_engines) - i915_gem_context_set_user_engines(dst); - else - i915_gem_context_clear_user_engines(dst); - return 0; - -err_unlock: - i915_sw_fence_complete(&e->fence); - return -ENOMEM; + return -EINVAL; } -static int clone_flags(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - dst->user_flags = src->user_flags; - return 0; -} +static const i915_user_extension_fn create_extensions[] = { + [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, + [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, +}; -static int clone_schedattr(struct i915_gem_context *dst, - struct i915_gem_context *src) +static bool client_is_banned(struct drm_i915_file_private *file_priv) { - dst->sched = src->sched; - return 0; + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; } -static int clone_sseu(struct i915_gem_context *dst, - struct i915_gem_context *src) +static inline struct i915_gem_context * +__context_lookup(struct drm_i915_file_private *file_priv, u32 id) { - struct i915_gem_engines *e = i915_gem_context_lock_engines(src); - struct i915_gem_engines *clone; - unsigned long n; - int err; - - /* no locking required; sole access under constructor*/ - clone = __context_engines_static(dst); - if (e->num_engines != clone->num_engines) { - err = -EINVAL; - goto unlock; - } - - for (n = 0; n < e->num_engines; n++) { - struct intel_context *ce = e->engines[n]; - - if (clone->engines[n]->engine->class != ce->engine->class) { - /* Must have compatible engine maps! */ - err = -EINVAL; - goto unlock; - } - - /* serialises with set_sseu */ - err = intel_context_lock_pinned(ce); - if (err) - goto unlock; + struct i915_gem_context *ctx; - clone->engines[n]->sseu = ce->sseu; - intel_context_unlock_pinned(ce); - } + rcu_read_lock(); + ctx = xa_load(&file_priv->context_xa, id); + if (ctx && !kref_get_unless_zero(&ctx->ref)) + ctx = NULL; + rcu_read_unlock(); - err = 0; -unlock: - i915_gem_context_unlock_engines(src); - return err; + return ctx; } -static int clone_timeline(struct i915_gem_context *dst, - struct i915_gem_context *src) +static struct i915_gem_context * +finalize_create_context_locked(struct drm_i915_file_private *file_priv, + struct i915_gem_proto_context *pc, u32 id) { - if (src->timeline) - __assign_timeline(dst, src->timeline); + struct i915_gem_context *ctx; + void *old; - return 0; -} + lockdep_assert_held(&file_priv->proto_context_lock); -static int clone_vm(struct i915_gem_context *dst, - struct i915_gem_context *src) -{ - struct i915_address_space *vm; - int err = 0; + ctx = i915_gem_create_context(file_priv->i915, pc); + if (IS_ERR(ctx)) + return ctx; - if (!rcu_access_pointer(src->vm)) - return 0; + /* + * One for the xarray and one for the caller. We need to grab + * the reference *prior* to making the ctx visible to userspace + * in gem_context_register(), as at any point after that + * userspace can try to race us with another thread destroying + * the context under our feet. + */ + i915_gem_context_get(ctx); - rcu_read_lock(); - vm = context_get_vm_rcu(src); - rcu_read_unlock(); + gem_context_register(ctx, file_priv, id); - if (!mutex_lock_interruptible(&dst->mutex)) { - __assign_ppgtt(dst, vm); - mutex_unlock(&dst->mutex); - } else { - err = -EINTR; - } + old = xa_erase(&file_priv->proto_context_xa, id); + GEM_BUG_ON(old != pc); + proto_context_close(file_priv->i915, pc); - i915_vm_put(vm); - return err; + return ctx; } -static int create_clone(struct i915_user_extension __user *ext, void *data) -{ - static int (* const fn[])(struct i915_gem_context *dst, - struct i915_gem_context *src) = { -#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y - MAP(ENGINES, clone_engines), - MAP(FLAGS, clone_flags), - MAP(SCHEDATTR, clone_schedattr), - MAP(SSEU, clone_sseu), - MAP(TIMELINE, clone_timeline), - MAP(VM, clone_vm), -#undef MAP - }; - struct drm_i915_gem_context_create_ext_clone local; - const struct create_ext *arg = data; - struct i915_gem_context *dst = arg->ctx; - struct i915_gem_context *src; - int err, bit; - - if (copy_from_user(&local, ext, sizeof(local))) - return -EFAULT; - - BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != - I915_CONTEXT_CLONE_UNKNOWN); - - if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) - return -EINVAL; - - if (local.rsvd) - return -EINVAL; - - rcu_read_lock(); - src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); - rcu_read_unlock(); - if (!src) - return -ENOENT; - - GEM_BUG_ON(src == dst); +struct i915_gem_context * +i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) +{ + struct i915_gem_proto_context *pc; + struct i915_gem_context *ctx; - for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { - if (!(local.flags & BIT(bit))) - continue; + ctx = __context_lookup(file_priv, id); + if (ctx) + return ctx; - err = fn[bit](dst, src); - if (err) - return err; + mutex_lock(&file_priv->proto_context_lock); + /* Try one more time under the lock */ + ctx = __context_lookup(file_priv, id); + if (!ctx) { + pc = xa_load(&file_priv->proto_context_xa, id); + if (!pc) + ctx = ERR_PTR(-ENOENT); + else + ctx = finalize_create_context_locked(file_priv, pc, id); } + mutex_unlock(&file_priv->proto_context_lock); - return 0; -} - -static const i915_user_extension_fn create_extensions[] = { - [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, - [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, -}; - -static bool client_is_banned(struct drm_i915_file_private *file_priv) -{ - return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; + return ctx; } int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, @@ -2309,7 +2378,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) return -EINVAL; - ret = intel_gt_terminally_wedged(&i915->gt); + ret = intel_gt_terminally_wedged(to_gt(i915)); if (ret) return ret; @@ -2321,9 +2390,10 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return -EIO; } - ext_data.ctx = i915_gem_create_context(i915, args->flags); - if (IS_ERR(ext_data.ctx)) - return PTR_ERR(ext_data.ctx); + ext_data.pc = proto_context_create(file->driver_priv, i915, + args->flags); + if (IS_ERR(ext_data.pc)) + return PTR_ERR(ext_data.pc); if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { ret = i915_user_extensions(u64_to_user_ptr(args->extensions), @@ -2331,20 +2401,38 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ARRAY_SIZE(create_extensions), &ext_data); if (ret) - goto err_ctx; + goto err_pc; } - ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); - if (ret < 0) - goto err_ctx; + if (GRAPHICS_VER(i915) > 12) { + struct i915_gem_context *ctx; + + /* Get ourselves a context ID */ + ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, + xa_limit_32b, GFP_KERNEL); + if (ret) + goto err_pc; + + ctx = i915_gem_create_context(i915, ext_data.pc); + if (IS_ERR(ctx)) { + ret = PTR_ERR(ctx); + goto err_pc; + } + + proto_context_close(i915, ext_data.pc); + gem_context_register(ctx, ext_data.fpriv, id); + } else { + ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); + if (ret < 0) + goto err_pc; + } args->ctx_id = id; - drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); return 0; -err_ctx: - context_close(ext_data.ctx); +err_pc: + proto_context_close(i915, ext_data.pc); return ret; } @@ -2353,6 +2441,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_context_destroy *args = data; struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; if (args->pad != 0) @@ -2361,11 +2450,24 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, if (!args->ctx_id) return -ENOENT; + /* We need to hold the proto-context lock here to prevent races + * with finalize_create_context_locked(). + */ + mutex_lock(&file_priv->proto_context_lock); ctx = xa_erase(&file_priv->context_xa, args->ctx_id); - if (!ctx) + pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); + mutex_unlock(&file_priv->proto_context_lock); + + if (!ctx && !pc) return -ENOENT; + GEM_WARN_ON(ctx && pc); + + if (pc) + proto_context_close(file_priv->i915, pc); + + if (ctx) + context_close(ctx); - context_close(ctx); return 0; } @@ -2430,26 +2532,20 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; + struct i915_address_space *vm; int ret = 0; ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; + if (IS_ERR(ctx)) + return PTR_ERR(ctx); switch (args->param) { - case I915_CONTEXT_PARAM_NO_ZEROMAP: - args->size = 0; - args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - case I915_CONTEXT_PARAM_GTT_SIZE: args->size = 0; - rcu_read_lock(); - if (rcu_access_pointer(ctx->vm)) - args->value = rcu_dereference(ctx->vm)->total; - else - args->value = to_i915(dev)->ggtt.vm.total; - rcu_read_unlock(); + vm = i915_gem_context_get_eb_vm(ctx); + args->value = vm->total; + i915_vm_put(vm); + break; case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: @@ -2480,20 +2576,20 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ret = get_ppgtt(file_priv, ctx, args); break; - case I915_CONTEXT_PARAM_ENGINES: - ret = get_engines(ctx, args); - break; - case I915_CONTEXT_PARAM_PERSISTENCE: args->size = 0; args->value = i915_gem_context_is_persistent(ctx); break; - case I915_CONTEXT_PARAM_RINGSIZE: - ret = get_ringsize(ctx, args); + case I915_CONTEXT_PARAM_PROTECTED_CONTENT: + ret = get_protected(ctx, args); break; + case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: + case I915_CONTEXT_PARAM_ENGINES: + case I915_CONTEXT_PARAM_RINGSIZE: + case I915_CONTEXT_PARAM_CONTEXT_IMAGE: default: ret = -EINVAL; break; @@ -2508,16 +2604,32 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; + struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; - int ret; + int ret = 0; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; + mutex_lock(&file_priv->proto_context_lock); + ctx = __context_lookup(file_priv, args->ctx_id); + if (!ctx) { + pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); + if (pc) { + /* Contexts should be finalized inside + * GEM_CONTEXT_CREATE starting with graphics + * version 13. + */ + WARN_ON(GRAPHICS_VER(file_priv->i915) > 12); + ret = set_proto_ctx_param(file_priv, pc, args); + } else { + ret = -ENOENT; + } + } + mutex_unlock(&file_priv->proto_context_lock); - ret = ctx_setparam(file_priv, ctx, args); + if (ctx) { + ret = ctx_setparam(file_priv, ctx, args); + i915_gem_context_put(ctx); + } - i915_gem_context_put(ctx); return ret; } @@ -2527,16 +2639,13 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_reset_stats *args = data; struct i915_gem_context *ctx; - int ret; if (args->flags || args->pad) return -EINVAL; - ret = -ENOENT; - rcu_read_lock(); - ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); - if (!ctx) - goto out; + ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); /* * We opt for unserialised reads here. This may result in tearing @@ -2553,10 +2662,8 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, args->batch_active = atomic_read(&ctx->guilty_count); args->batch_pending = atomic_read(&ctx->active_count); - ret = 0; -out: - rcu_read_unlock(); - return ret; + i915_gem_context_put(ctx); + return 0; } /* GEM context-engines iterator: for_each_gem_engine() */ @@ -2584,27 +2691,33 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) #include "selftests/i915_gem_context.c" #endif -static void i915_global_gem_context_shrink(void) -{ - kmem_cache_shrink(global.slab_luts); -} - -static void i915_global_gem_context_exit(void) +void i915_gem_context_module_exit(void) { - kmem_cache_destroy(global.slab_luts); + kmem_cache_destroy(slab_luts); } -static struct i915_global_gem_context global = { { - .shrink = i915_global_gem_context_shrink, - .exit = i915_global_gem_context_exit, -} }; - -int __init i915_global_gem_context_init(void) +int __init i915_gem_context_module_init(void) { - global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); - if (!global.slab_luts) + slab_luts = KMEM_CACHE(i915_lut_handle, 0); + if (!slab_luts) return -ENOMEM; - i915_global_register(&global.base); + if (IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) { + pr_notice("**************************************************************\n"); + pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_notice("** **\n"); + if (i915_modparams.enable_debug_only_api) + pr_notice("** i915.enable_debug_only_api is intended to be set **\n"); + else + pr_notice("** CONFIG_DRM_I915_REPLAY_GPU_HANGS_API builds are intended **\n"); + pr_notice("** for specific userspace graphics stack developers only! **\n"); + pr_notice("** **\n"); + pr_notice("** If you are seeing this message please report this to the **\n"); + pr_notice("** provider of your kernel build. **\n"); + pr_notice("** **\n"); + pr_notice("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); + pr_notice("**************************************************************\n"); + } + return 0; } |
