diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_slpc.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gt/selftest_slpc.c | 318 |
1 files changed, 284 insertions, 34 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c index f8a1d27df272..e61bb0bad12c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_slpc.c +++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c @@ -11,7 +11,16 @@ enum test_type { VARY_MIN, VARY_MAX, - MAX_GRANTED + MAX_GRANTED, + SLPC_POWER, + TILE_INTERACTION, +}; + +struct slpc_thread { + struct kthread_worker *worker; + struct kthread_work work; + struct intel_gt *gt; + int result; }; static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq) @@ -41,6 +50,79 @@ static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq) return ret; } +static int slpc_set_freq(struct intel_gt *gt, u32 freq) +{ + int err; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; + + err = slpc_set_max_freq(slpc, freq); + if (err) { + pr_err("Unable to update max freq"); + return err; + } + + err = slpc_set_min_freq(slpc, freq); + if (err) { + pr_err("Unable to update min freq"); + return err; + } + + return err; +} + +static int slpc_restore_freq(struct intel_guc_slpc *slpc, u32 min, u32 max) +{ + int err; + + err = slpc_set_max_freq(slpc, max); + if (err) { + pr_err("Unable to restore max freq"); + return err; + } + + err = slpc_set_min_freq(slpc, min); + if (err) { + pr_err("Unable to restore min freq"); + return err; + } + + err = intel_guc_slpc_set_ignore_eff_freq(slpc, false); + if (err) { + pr_err("Unable to restore efficient freq"); + return err; + } + + return 0; +} + +static u64 slpc_measure_power(struct intel_rps *rps, int *freq) +{ + u64 x[5]; + int i; + + for (i = 0; i < 5; i++) + x[i] = __measure_power(5); + + *freq = (*freq + intel_rps_read_actual_frequency(rps)) / 2; + + /* A simple triangle filter for better result stability */ + sort(x, 5, sizeof(*x), cmp_u64, NULL); + return div_u64(x[1] + 2 * x[2] + x[3], 4); +} + +static u64 measure_power_at_freq(struct intel_gt *gt, int *freq, u64 *power) +{ + int err = 0; + + err = slpc_set_freq(gt, *freq); + if (err) + return err; + *freq = intel_rps_read_actual_frequency(>->rps); + *power = slpc_measure_power(>->rps, freq); + + return err; +} + static int vary_max_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq) { @@ -113,6 +195,58 @@ static int vary_min_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, return err; } +static int slpc_power(struct intel_gt *gt, struct intel_engine_cs *engine) +{ + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; + struct { + u64 power; + int freq; + } min, max; + int err = 0; + + /* + * Our fundamental assumption is that running at lower frequency + * actually saves power. Let's see if our RAPL measurement supports + * that theory. + */ + if (!librapl_supported(gt->i915)) + return 0; + + min.freq = slpc->min_freq; + err = measure_power_at_freq(gt, &min.freq, &min.power); + + if (err) + return err; + + max.freq = slpc->rp0_freq; + err = measure_power_at_freq(gt, &max.freq, &max.power); + + if (err) + return err; + + pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n", + engine->name, + min.power, min.freq, + max.power, max.freq); + + if (10 * min.freq >= 9 * max.freq) { + pr_notice("Could not control frequency, ran at [%uMHz, %uMhz]\n", + min.freq, max.freq); + } + + if (11 * min.power > 10 * max.power) { + pr_err("%s: did not conserve power when setting lower frequency!\n", + engine->name); + err = -EINVAL; + } + + /* Restore min/max frequencies */ + slpc_set_max_freq(slpc, slpc->rp0_freq); + slpc_set_min_freq(slpc, slpc->min_freq); + + return err; +} + static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, u32 *max_act_freq) { struct intel_gt *gt = rps_to_gt(rps); @@ -126,7 +260,8 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, *max_act_freq = intel_rps_read_actual_frequency(rps); if (*max_act_freq != slpc->rp0_freq) { /* Check if there was some throttling by pcode */ - perf_limit_reasons = intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS); + perf_limit_reasons = intel_uncore_read(gt->uncore, + intel_gt_perf_limit_reasons_reg(gt)); /* If not, this is an error */ if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) { @@ -142,10 +277,11 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps, static int run_test(struct intel_gt *gt, int test_type) { - struct intel_guc_slpc *slpc = >->uc.guc.slpc; + struct intel_guc_slpc *slpc = >_to_guc(gt)->slpc; struct intel_rps *rps = >->rps; struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; struct igt_spinner spin; u32 slpc_min_freq, slpc_max_freq; int err = 0; @@ -153,6 +289,11 @@ static int run_test(struct intel_gt *gt, int test_type) if (!intel_uc_uses_guc_slpc(>->uc)) return 0; + if (slpc->min_freq == slpc->rp0_freq) { + pr_err("Min/Max are fused to the same value\n"); + return -EINVAL; + } + if (igt_spinner_init(&spin, gt)) return -ENOMEM; @@ -167,21 +308,26 @@ static int run_test(struct intel_gt *gt, int test_type) } /* - * FIXME: With efficient frequency enabled, GuC can request - * frequencies higher than the SLPC max. While this is fixed - * in GuC, we level set these tests with RPn as min. + * Set min frequency to RPn so that we can test the whole + * range of RPn-RP0. */ err = slpc_set_min_freq(slpc, slpc->min_freq); - if (err) + if (err) { + pr_err("Unable to update min freq!"); return err; + } - if (slpc->min_freq == slpc->rp0_freq) { - pr_err("Min/Max are fused to the same value\n"); - return -EINVAL; + /* + * Turn off efficient frequency so RPn/RP0 ranges are obeyed. + */ + err = intel_guc_slpc_set_ignore_eff_freq(slpc, true); + if (err) { + pr_err("Unable to turn off efficient freq!"); + return err; } intel_gt_pm_wait_for_idle(gt); - intel_gt_pm_get(gt); + wakeref = intel_gt_pm_get(gt); for_each_engine(engine, gt, id) { struct i915_request *rq; u32 max_act_freq; @@ -222,9 +368,10 @@ static int run_test(struct intel_gt *gt, int test_type) break; case MAX_GRANTED: + case TILE_INTERACTION: /* Media engines have a different RP0 */ - if (engine->class == VIDEO_DECODE_CLASS || - engine->class == VIDEO_ENHANCEMENT_CLASS) { + if (gt->type != GT_MEDIA && (engine->class == VIDEO_DECODE_CLASS || + engine->class == VIDEO_ENHANCEMENT_CLASS)) { igt_spinner_end(&spin); st_engine_heartbeat_enable(engine); err = 0; @@ -233,17 +380,24 @@ static int run_test(struct intel_gt *gt, int test_type) err = max_granted_freq(slpc, rps, &max_act_freq); break; - } - pr_info("Max actual frequency for %s was %d\n", - engine->name, max_act_freq); + case SLPC_POWER: + err = slpc_power(gt, engine); + break; + } - /* Actual frequency should rise above min */ - if (max_act_freq <= slpc_min_freq) { - pr_err("Actual freq did not rise above min\n"); - pr_err("Perf Limit Reasons: 0x%x\n", - intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS)); - err = -EINVAL; + if (test_type != SLPC_POWER) { + pr_info("Max actual frequency for %s was %d\n", + engine->name, max_act_freq); + + /* Actual frequency should rise above min */ + if (max_act_freq <= slpc->min_freq) { + pr_err("Actual freq did not rise above min\n"); + pr_err("Perf Limit Reasons: 0x%x\n", + intel_uncore_read(gt->uncore, + intel_gt_perf_limit_reasons_reg(gt))); + err = -EINVAL; + } } igt_spinner_end(&spin); @@ -253,14 +407,13 @@ static int run_test(struct intel_gt *gt, int test_type) break; } - /* Restore min/max frequencies */ - slpc_set_max_freq(slpc, slpc_max_freq); - slpc_set_min_freq(slpc, slpc_min_freq); + /* Restore min/max/efficient frequencies */ + err = slpc_restore_freq(slpc, slpc_min_freq, slpc_max_freq); if (igt_flush_test(gt->i915)) err = -EIO; - intel_gt_pm_put(gt); + intel_gt_pm_put(gt, wakeref); igt_spinner_fini(&spin); intel_gt_pm_wait_for_idle(gt); @@ -270,26 +423,116 @@ static int run_test(struct intel_gt *gt, int test_type) static int live_slpc_vary_min(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_gt *gt = to_gt(i915); + struct intel_gt *gt; + unsigned int i; + int ret; + + for_each_gt(gt, i915, i) { + ret = run_test(gt, VARY_MIN); + if (ret) + return ret; + } - return run_test(gt, VARY_MIN); + return ret; } static int live_slpc_vary_max(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_gt *gt = to_gt(i915); + struct intel_gt *gt; + unsigned int i; + int ret; + + for_each_gt(gt, i915, i) { + ret = run_test(gt, VARY_MAX); + if (ret) + return ret; + } - return run_test(gt, VARY_MAX); + return ret; } /* check if pcode can grant RP0 */ static int live_slpc_max_granted(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_gt *gt = to_gt(i915); + struct intel_gt *gt; + unsigned int i; + int ret; + + for_each_gt(gt, i915, i) { + ret = run_test(gt, MAX_GRANTED); + if (ret) + return ret; + } + + return ret; +} + +static int live_slpc_power(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_gt *gt; + unsigned int i; + int ret; + + for_each_gt(gt, i915, i) { + ret = run_test(gt, SLPC_POWER); + if (ret) + return ret; + } - return run_test(gt, MAX_GRANTED); + return ret; +} + +static void slpc_spinner_thread(struct kthread_work *work) +{ + struct slpc_thread *thread = container_of(work, typeof(*thread), work); + + thread->result = run_test(thread->gt, TILE_INTERACTION); +} + +static int live_slpc_tile_interaction(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_gt *gt; + struct slpc_thread *threads; + int i = 0, ret = 0; + + threads = kcalloc(I915_MAX_GT, sizeof(*threads), GFP_KERNEL); + if (!threads) + return -ENOMEM; + + for_each_gt(gt, i915, i) { + threads[i].worker = kthread_run_worker(0, "igt/slpc_parallel:%d", gt->info.id); + + if (IS_ERR(threads[i].worker)) { + ret = PTR_ERR(threads[i].worker); + break; + } + + threads[i].gt = gt; + kthread_init_work(&threads[i].work, slpc_spinner_thread); + kthread_queue_work(threads[i].worker, &threads[i].work); + } + + for_each_gt(gt, i915, i) { + int status; + + if (IS_ERR_OR_NULL(threads[i].worker)) + continue; + + kthread_flush_work(&threads[i].work); + status = READ_ONCE(threads[i].result); + if (status && !ret) { + pr_err("%s GT %d failed ", __func__, gt->info.id); + ret = status; + } + kthread_destroy_worker(threads[i].worker); + } + + kfree(threads); + return ret; } int intel_slpc_live_selftests(struct drm_i915_private *i915) @@ -298,10 +541,17 @@ int intel_slpc_live_selftests(struct drm_i915_private *i915) SUBTEST(live_slpc_vary_max), SUBTEST(live_slpc_vary_min), SUBTEST(live_slpc_max_granted), + SUBTEST(live_slpc_power), + SUBTEST(live_slpc_tile_interaction), }; - if (intel_gt_is_wedged(to_gt(i915))) - return 0; + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, i915, i) { + if (intel_gt_is_wedged(gt)) + return 0; + } return i915_live_subtests(tests, i915); } |
