diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 560 |
1 files changed, 451 insertions, 109 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 8842c55d4490..afedea02188d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -23,6 +23,7 @@ */ #include <drm/drm_auth.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_sched.h" #include "amdgpu_ras.h" @@ -41,16 +42,66 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = { [AMDGPU_HW_IP_VCN_DEC] = 1, [AMDGPU_HW_IP_VCN_ENC] = 1, [AMDGPU_HW_IP_VCN_JPEG] = 1, + [AMDGPU_HW_IP_VPE] = 1, }; -static int amdgpu_ctx_priority_permit(struct drm_file *filp, - enum drm_sched_priority priority) +bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio) { - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) - return -EINVAL; + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_VERY_LOW: + case AMDGPU_CTX_PRIORITY_LOW: + case AMDGPU_CTX_PRIORITY_NORMAL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return true; + default: + case AMDGPU_CTX_PRIORITY_UNSET: + /* UNSET priority is not valid and we don't carry that + * around, but set it to NORMAL in the only place this + * function is called, amdgpu_ctx_ioctl(). + */ + return false; + } +} + +static enum drm_sched_priority +amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio) +{ + switch (ctx_prio) { + case AMDGPU_CTX_PRIORITY_UNSET: + pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL"); + return DRM_SCHED_PRIORITY_NORMAL; + + case AMDGPU_CTX_PRIORITY_VERY_LOW: + return DRM_SCHED_PRIORITY_LOW; + + case AMDGPU_CTX_PRIORITY_LOW: + return DRM_SCHED_PRIORITY_LOW; + + case AMDGPU_CTX_PRIORITY_NORMAL: + return DRM_SCHED_PRIORITY_NORMAL; + + case AMDGPU_CTX_PRIORITY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return DRM_SCHED_PRIORITY_HIGH; + + /* This should not happen as we sanitized userspace provided priority + * already, WARN if this happens. + */ + default: + WARN(1, "Invalid context priority %d\n", ctx_prio); + return DRM_SCHED_PRIORITY_NORMAL; + } + +} + +static int amdgpu_ctx_priority_permit(struct drm_file *filp, + int32_t priority) +{ /* NORMAL and below are accessible by everyone */ - if (priority <= DRM_SCHED_PRIORITY_NORMAL) + if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) return 0; if (capable(CAP_SYS_NICE)) @@ -62,26 +113,52 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, return -EACCES; } -static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio) { switch (prio) { - case DRM_SCHED_PRIORITY_HIGH_HW: - case DRM_SCHED_PRIORITY_KERNEL: + case AMDGPU_CTX_PRIORITY_HIGH: + case AMDGPU_CTX_PRIORITY_VERY_HIGH: return AMDGPU_GFX_PIPE_PRIO_HIGH; default: return AMDGPU_GFX_PIPE_PRIO_NORMAL; } } -static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, - enum drm_sched_priority prio, - u32 hw_ip) +static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio) +{ + switch (prio) { + case AMDGPU_CTX_PRIORITY_HIGH: + return AMDGPU_RING_PRIO_1; + case AMDGPU_CTX_PRIORITY_VERY_HIGH: + return AMDGPU_RING_PRIO_2; + default: + return AMDGPU_RING_PRIO_0; + } +} + +static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip) { + struct amdgpu_device *adev = ctx->mgr->adev; unsigned int hw_prio; + int32_t ctx_prio; + + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + + switch (hw_ip) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_HW_IP_COMPUTE: + hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio); + break; + case AMDGPU_HW_IP_VCE: + case AMDGPU_HW_IP_VCN_ENC: + hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio); + break; + default: + hw_prio = AMDGPU_RING_PRIO_DEFAULT; + break; + } - hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ? - amdgpu_ctx_sched_prio_to_compute_prio(prio) : - AMDGPU_RING_PRIO_DEFAULT; hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0) hw_prio = AMDGPU_RING_PRIO_DEFAULT; @@ -89,56 +166,162 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev, return hw_prio; } +/* Calculate the time spend on the hw */ +static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence) +{ + struct drm_sched_fence *s_fence; + + if (!fence) + return ns_to_ktime(0); + + /* When the fence is not even scheduled it can't have spend time */ + s_fence = to_drm_sched_fence(fence); + if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags)) + return ns_to_ktime(0); + + /* When it is still running account how much already spend */ + if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags)) + return ktime_sub(ktime_get(), s_fence->scheduled.timestamp); + + return ktime_sub(s_fence->finished.timestamp, + s_fence->scheduled.timestamp); +} + +static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *centity) +{ + ktime_t res = ns_to_ktime(0); + uint32_t i; + + spin_lock(&ctx->ring_lock); + for (i = 0; i < amdgpu_sched_jobs; i++) { + res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); + } + spin_unlock(&ctx->ring_lock); + return res; +} + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip, - const u32 ring) + const u32 ring) { - struct amdgpu_device *adev = ctx->adev; - struct amdgpu_ctx_entity *entity; struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; - unsigned num_scheds = 0; - unsigned int hw_prio; - enum drm_sched_priority priority; + struct amdgpu_device *adev = ctx->mgr->adev; + struct amdgpu_ctx_entity *entity; + enum drm_sched_priority drm_prio; + unsigned int hw_prio, num_scheds; + int32_t ctx_prio; int r; - entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), + entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), GFP_KERNEL); if (!entity) return -ENOMEM; + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? + ctx->init_priority : ctx->override_priority; + entity->hw_ip = hw_ip; entity->sequence = 1; - priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? - ctx->init_priority : ctx->override_priority; - hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip); + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); + drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio); hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM); - scheds = adev->gpu_sched[hw_ip][hw_prio].sched; - num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; - if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) { + if (!(adev)->xcp_mgr) { + scheds = adev->gpu_sched[hw_ip][hw_prio].sched; + num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; + } else { + struct amdgpu_fpriv *fpriv; + + fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr); + r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv, + &num_scheds, &scheds); + if (r) + goto error_free_entity; + } + + /* disable load balance if the hw engine retains context among dependent jobs */ + if (hw_ip == AMDGPU_HW_IP_VCN_ENC || + hw_ip == AMDGPU_HW_IP_VCN_DEC || + hw_ip == AMDGPU_HW_IP_UVD_ENC || + hw_ip == AMDGPU_HW_IP_UVD) { sched = drm_sched_pick_best(scheds, num_scheds); scheds = &sched; num_scheds = 1; } - r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, + r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds, &ctx->guilty); if (r) goto error_free_entity; - ctx->entities[hw_ip][ring] = entity; + /* It's not an error if we fail to install the new entity */ + if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity)) + goto cleanup_entity; + return 0; +cleanup_entity: + drm_sched_entity_fini(&entity->entity); + error_free_entity: kfree(entity); return r; } -static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum drm_sched_priority priority, - struct drm_file *filp, - struct amdgpu_ctx *ctx) +static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev, + struct amdgpu_ctx_entity *entity) +{ + ktime_t res = ns_to_ktime(0); + int i; + + if (!entity) + return res; + + for (i = 0; i < amdgpu_sched_jobs; ++i) { + res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); + dma_fence_put(entity->fences[i]); + } + + amdgpu_xcp_release_sched(adev, entity); + + kfree(entity); + return res; +} + +static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx, + u32 *stable_pstate) +{ + struct amdgpu_device *adev = ctx->mgr->adev; + enum amd_dpm_forced_level current_level; + + current_level = amdgpu_dpm_get_performance_level(adev); + + switch (current_level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK; + break; + default: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE; + break; + } + return 0; +} + +static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority, + struct drm_file *filp, struct amdgpu_ctx *ctx) { + struct amdgpu_fpriv *fpriv = filp->driver_priv; + u32 current_stable_pstate; int r; r = amdgpu_ctx_priority_permit(filp, priority); @@ -147,52 +330,104 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, memset(ctx, 0, sizeof(*ctx)); - ctx->adev = adev; - kref_init(&ctx->refcount); + ctx->mgr = mgr; spin_lock_init(&ctx->ring_lock); - mutex_init(&ctx->lock); - ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); + ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter); ctx->reset_counter_query = ctx->reset_counter; - ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); + ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm); ctx->init_priority = priority; - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; + ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; + r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate); + if (r) + return r; + + if (mgr->adev->pm.stable_pstate_ctx) + ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate; + else + ctx->stable_pstate = current_stable_pstate; + + ctx->ctx_mgr = &(fpriv->ctx_mgr); return 0; } -static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) +static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx, + u32 stable_pstate) { + struct amdgpu_device *adev = ctx->mgr->adev; + enum amd_dpm_forced_level level; + u32 current_stable_pstate; + int r; - int i; + mutex_lock(&adev->pm.stable_pstate_ctx_lock); + if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) { + r = -EBUSY; + goto done; + } - if (!entity) - return; + r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate); + if (r || (stable_pstate == current_stable_pstate)) + goto done; - for (i = 0; i < amdgpu_sched_jobs; ++i) - dma_fence_put(entity->fences[i]); + switch (stable_pstate) { + case AMDGPU_CTX_STABLE_PSTATE_NONE: + level = AMD_DPM_FORCED_LEVEL_AUTO; + break; + case AMDGPU_CTX_STABLE_PSTATE_STANDARD: + level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; + break; + case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK: + level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; + break; + case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK: + level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; + break; + case AMDGPU_CTX_STABLE_PSTATE_PEAK: + level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + break; + default: + r = -EINVAL; + goto done; + } - kfree(entity); + r = amdgpu_dpm_force_performance_level(adev, level); + + if (level == AMD_DPM_FORCED_LEVEL_AUTO) + adev->pm.stable_pstate_ctx = NULL; + else + adev->pm.stable_pstate_ctx = ctx; +done: + mutex_unlock(&adev->pm.stable_pstate_ctx_lock); + + return r; } static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); - struct amdgpu_device *adev = ctx->adev; - unsigned i, j; + struct amdgpu_ctx_mgr *mgr = ctx->mgr; + struct amdgpu_device *adev = mgr->adev; + unsigned i, j, idx; if (!adev) return; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { - amdgpu_ctx_fini_entity(ctx->entities[i][j]); - ctx->entities[i][j] = NULL; + ktime_t spend; + + spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]); + atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]); } } - mutex_destroy(&ctx->lock); + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate); + drm_dev_exit(idx); + } + kfree(ctx); } @@ -200,6 +435,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, u32 ring, struct drm_sched_entity **entity) { int r; + struct drm_sched_entity *ctx_entity; if (hw_ip >= AMDGPU_HW_IP_NUM) { DRM_ERROR("unknown HW IP type: %d\n", hw_ip); @@ -223,14 +459,21 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, return r; } - *entity = &ctx->entities[hw_ip][ring]->entity; + ctx_entity = &ctx->entities[hw_ip][ring]->entity; + r = drm_sched_entity_error(ctx_entity); + if (r) { + DRM_DEBUG("error entity %p\n", ctx_entity); + return r; + } + + *entity = ctx_entity; return 0; } static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, - enum drm_sched_priority priority, + int32_t priority, uint32_t *id) { struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -250,7 +493,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, } *id = (uint32_t)r; - r = amdgpu_ctx_init(adev, priority, filp, ctx); + r = amdgpu_ctx_init(mgr, priority, filp, ctx); if (r) { idr_remove(&mgr->ctx_handles, *id); *id = 0; @@ -327,13 +570,15 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, return 0; } +#define AMDGPU_RAS_COUNTE_DELAY_MS 3000 + static int amdgpu_ctx_query2(struct amdgpu_device *adev, - struct amdgpu_fpriv *fpriv, uint32_t id, - union drm_amdgpu_ctx_out *out) + struct amdgpu_fpriv *fpriv, uint32_t id, + union drm_amdgpu_ctx_out *out) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr; - unsigned long ras_counter; if (!fpriv) return -EINVAL; @@ -352,65 +597,129 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; - if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) + if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; - /*query ue count*/ - ras_counter = amdgpu_ras_query_error_count(adev, false); - /*ras counter is monotonic increasing*/ - if (ras_counter != ctx->ras_counter_ue) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; - ctx->ras_counter_ue = ras_counter; - } + if (amdgpu_in_reset(adev)) + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS; + + if (adev->ras_enabled && con) { + /* Return the cached values in O(1), + * and schedule delayed work to cache + * new vaues. + */ + int ce_count, ue_count; - /*query ce count*/ - ras_counter = amdgpu_ras_query_error_count(adev, true); - if (ras_counter != ctx->ras_counter_ce) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; - ctx->ras_counter_ce = ras_counter; + ce_count = atomic_read(&con->ras_ce_count); + ue_count = atomic_read(&con->ras_ue_count); + + if (ce_count != ctx->ras_counter_ce) { + ctx->ras_counter_ce = ce_count; + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; + } + + if (ue_count != ctx->ras_counter_ue) { + ctx->ras_counter_ue = ue_count; + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; + } + + schedule_delayed_work(&con->ras_counte_delay_work, + msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS)); } mutex_unlock(&mgr->lock); return 0; } +static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, uint32_t id, + bool set, u32 *stable_pstate) +{ + struct amdgpu_ctx *ctx; + struct amdgpu_ctx_mgr *mgr; + int r; + + if (!fpriv) + return -EINVAL; + + mgr = &fpriv->ctx_mgr; + mutex_lock(&mgr->lock); + ctx = idr_find(&mgr->ctx_handles, id); + if (!ctx) { + mutex_unlock(&mgr->lock); + return -EINVAL; + } + + if (set) + r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate); + else + r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate); + + mutex_unlock(&mgr->lock); + return r; +} + int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { int r; - uint32_t id; - enum drm_sched_priority priority; + uint32_t id, stable_pstate; + int32_t priority; union drm_amdgpu_ctx *args = data; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; - r = 0; id = args->in.ctx_id; - priority = amdgpu_to_sched_priority(args->in.priority); + priority = args->in.priority; - /* For backwards compatibility reasons, we need to accept - * ioctls with garbage in the priority field */ - if (priority == DRM_SCHED_PRIORITY_INVALID) - priority = DRM_SCHED_PRIORITY_NORMAL; + /* For backwards compatibility, we need to accept ioctls with garbage + * in the priority field. Garbage values in the priority field, result + * in the priority being set to NORMAL. + */ + if (!amdgpu_ctx_priority_is_valid(priority)) + priority = AMDGPU_CTX_PRIORITY_NORMAL; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: + if (args->in.flags) + return -EINVAL; r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); args->out.alloc.ctx_id = id; break; case AMDGPU_CTX_OP_FREE_CTX: + if (args->in.flags) + return -EINVAL; r = amdgpu_ctx_free(fpriv, id); break; case AMDGPU_CTX_OP_QUERY_STATE: + if (args->in.flags) + return -EINVAL; r = amdgpu_ctx_query(adev, fpriv, id, &args->out); break; case AMDGPU_CTX_OP_QUERY_STATE2: + if (args->in.flags) + return -EINVAL; r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); break; + case AMDGPU_CTX_OP_GET_STABLE_PSTATE: + if (args->in.flags) + return -EINVAL; + r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate); + if (!r) + args->out.pstate.flags = stable_pstate; + break; + case AMDGPU_CTX_OP_SET_STABLE_PSTATE: + if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK) + return -EINVAL; + stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK; + if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK) + return -EINVAL; + r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate); + break; default: return -EINVAL; } @@ -445,9 +754,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx) return 0; } -void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, - struct drm_sched_entity *entity, - struct dma_fence *fence, uint64_t* handle) +uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity, + struct dma_fence *fence) { struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); uint64_t seq = centity->sequence; @@ -456,8 +765,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, idx = seq & (amdgpu_sched_jobs - 1); other = centity->fences[idx]; - if (other) - BUG_ON(!dma_fence_is_signaled(other)); + WARN_ON(other && !dma_fence_is_signaled(other)); dma_fence_get(fence); @@ -466,9 +774,11 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, centity->sequence++; spin_unlock(&ctx->ring_lock); + atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)), + &ctx->mgr->time_spend[centity->hw_ip]); + dma_fence_put(other); - if (handle) - *handle = seq; + return seq; } struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, @@ -501,22 +811,22 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, } static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, - struct amdgpu_ctx_entity *aentity, - int hw_ip, - enum drm_sched_priority priority) + struct amdgpu_ctx_entity *aentity, + int hw_ip, + int32_t priority) { - struct amdgpu_device *adev = ctx->adev; + struct amdgpu_device *adev = ctx->mgr->adev; unsigned int hw_prio; struct drm_gpu_scheduler **scheds = NULL; unsigned num_scheds; /* set sw priority */ - drm_sched_entity_set_priority(&aentity->entity, priority); + drm_sched_entity_set_priority(&aentity->entity, + amdgpu_ctx_to_drm_sched_prio(priority)); /* set hw priority */ - if (hw_ip == AMDGPU_HW_IP_COMPUTE) { - hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, - AMDGPU_HW_IP_COMPUTE); + if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) { + hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip); hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX); scheds = adev->gpu_sched[hw_ip][hw_prio].sched; num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds; @@ -526,14 +836,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, } void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum drm_sched_priority priority) + int32_t priority) { - enum drm_sched_priority ctx_prio; + int32_t ctx_prio; unsigned i, j; ctx->override_priority = priority; - ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? + ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { @@ -570,10 +880,17 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, return r; } -void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) +void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr, + struct amdgpu_device *adev) { + unsigned int i; + + mgr->adev = adev; mutex_init(&mgr->lock); - idr_init(&mgr->ctx_handles); + idr_init_base(&mgr->ctx_handles, 1); + + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) + atomic64_set(&mgr->time_spend[i], 0); } long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) @@ -602,7 +919,7 @@ long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) return timeout; } -void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) +static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) { struct amdgpu_ctx *ctx; struct idr *idp; @@ -627,24 +944,49 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) drm_sched_entity_fini(entity); } } + kref_put(&ctx->refcount, amdgpu_ctx_fini); } } void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) { + amdgpu_ctx_mgr_entity_fini(mgr); + idr_destroy(&mgr->ctx_handles); + mutex_destroy(&mgr->lock); +} + +void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr, + ktime_t usage[AMDGPU_HW_IP_NUM]) +{ struct amdgpu_ctx *ctx; - struct idr *idp; + unsigned int hw_ip, i; uint32_t id; - amdgpu_ctx_mgr_entity_fini(mgr); - - idp = &mgr->ctx_handles; + /* + * This is a little bit racy because it can be that a ctx or a fence are + * destroyed just in the moment we try to account them. But that is ok + * since exactly that case is explicitely allowed by the interface. + */ + mutex_lock(&mgr->lock); + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { + uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]); - idr_for_each_entry(idp, ctx, id) { - if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) - DRM_ERROR("ctx %p is still alive\n", ctx); + usage[hw_ip] = ns_to_ktime(ns); } - idr_destroy(&mgr->ctx_handles); - mutex_destroy(&mgr->lock); + idr_for_each_entry(&mgr->ctx_handles, ctx, id) { + for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { + for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) { + struct amdgpu_ctx_entity *centity; + ktime_t spend; + + centity = ctx->entities[hw_ip][i]; + if (!centity) + continue; + spend = amdgpu_ctx_entity_time(ctx, centity); + usage[hw_ip] = ktime_add(usage[hw_ip], spend); + } + } + } + mutex_unlock(&mgr->lock); } |
