diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_pmu.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.c | 619 |
1 files changed, 305 insertions, 314 deletions
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 34d37d46a126..a6697db21c72 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -6,16 +6,20 @@ #include <linux/pm_runtime.h> +#include <drm/drm_print.h> + #include "gt/intel_engine.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" #include "gt/intel_rc6.h" #include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_pmu.h" -#include "intel_pm.h" /* Frequency for the sampling timer for events which need it. */ #define FREQUENCY 200 @@ -26,8 +30,15 @@ BIT(I915_SAMPLE_WAIT) | \ BIT(I915_SAMPLE_SEMA)) -static cpumask_t i915_pmu_cpumask; -static unsigned int i915_pmu_target_cpu = -1; +static struct i915_pmu *event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct i915_pmu, base); +} + +static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu) +{ + return container_of(pmu, struct drm_i915_private, pmu); +} static u8 engine_config_sample(u64 config) { @@ -49,16 +60,26 @@ static u8 engine_event_instance(struct perf_event *event) return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; } -static bool is_engine_config(u64 config) +static bool is_engine_config(const u64 config) { return config < __I915_PMU_OTHER(0); } +static unsigned int config_gt_id(const u64 config) +{ + return config >> __I915_PMU_GT_SHIFT; +} + +static u64 config_counter(const u64 config) +{ + return config & ~(~0ULL << __I915_PMU_GT_SHIFT); +} + static unsigned int other_bit(const u64 config) { unsigned int val; - switch (config) { + switch (config_counter(config)) { case I915_PMU_ACTUAL_FREQUENCY: val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; break; @@ -76,7 +97,9 @@ static unsigned int other_bit(const u64 config) return -1; } - return I915_ENGINE_SAMPLE_COUNT + val; + return I915_ENGINE_SAMPLE_COUNT + + config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT + + val; } static unsigned int config_bit(const u64 config) @@ -87,9 +110,20 @@ static unsigned int config_bit(const u64 config) return other_bit(config); } -static u64 config_mask(u64 config) +static __always_inline u32 config_mask(const u64 config) { - return BIT_ULL(config_bit(config)); + unsigned int bit = config_bit(config); + + if (__builtin_constant_p(bit)) + BUILD_BUG_ON(bit > + BITS_PER_TYPE(typeof_member(struct i915_pmu, + enable)) - 1); + else + WARN_ON_ONCE(bit > + BITS_PER_TYPE(typeof_member(struct i915_pmu, + enable)) - 1); + + return BIT(bit); } static bool is_engine_event(struct perf_event *event) @@ -102,9 +136,21 @@ static unsigned int event_bit(struct perf_event *event) return config_bit(event->attr.config); } -static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) +static u32 frequency_enabled_mask(void) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + unsigned int i; + u32 mask = 0; + + for (i = 0; i < I915_PMU_MAX_GT; i++) + mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) | + config_mask(__I915_PMU_REQUESTED_FREQUENCY(i)); + + return mask; +} + +static bool pmu_needs_timer(struct i915_pmu *pmu) +{ + struct drm_i915_private *i915 = pmu_to_i915(pmu); u32 enable; /* @@ -118,21 +164,13 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) * Mask out all the ones which do not need the timer, or in * other words keep all the ones that could need the timer. */ - enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_mask(I915_PMU_REQUESTED_FREQUENCY) | - ENGINE_SAMPLE_MASK; + enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK; /* - * When the GPU is idle per-engine counters do not need to be - * running so clear those bits out. - */ - if (!gpu_active) - enable &= ~ENGINE_SAMPLE_MASK; - /* * Also there is software busyness tracking available we do not * need the timer for I915_SAMPLE_BUSY counter. */ - else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) + if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) enable &= ~BIT(I915_SAMPLE_BUSY); /* @@ -146,16 +184,13 @@ static u64 __get_rc6(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; u64 val; - val = intel_rc6_residency_ns(>->rc6, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); + val = intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6); if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); + val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6p); if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); + val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6pp); return val; } @@ -165,24 +200,42 @@ static inline s64 ktime_since_raw(const ktime_t kt) return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); } +static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) +{ + return pmu->sample[gt_id][sample].cur; +} + +static void +store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) +{ + pmu->sample[gt_id][sample].cur = val; +} + +static void +add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) +{ + pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); +} + static u64 get_rc6(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; + const unsigned int gt_id = gt->info.id; struct i915_pmu *pmu = &i915->pmu; + intel_wakeref_t wakeref; unsigned long flags; - bool awake = false; u64 val; - if (intel_gt_pm_get_if_awake(gt)) { + wakeref = intel_gt_pm_get_if_awake(gt); + if (wakeref) { val = __get_rc6(gt); - intel_gt_pm_put_async(gt); - awake = true; + intel_gt_pm_put_async(gt, wakeref); } spin_lock_irqsave(&pmu->lock, flags); - if (awake) { - pmu->sample[__I915_SAMPLE_RC6].cur = val; + if (wakeref) { + store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); } else { /* * We think we are runtime suspended. @@ -191,14 +244,14 @@ static u64 get_rc6(struct intel_gt *gt) * on top of the last known real value, as the approximated RC6 * counter value. */ - val = ktime_since_raw(pmu->sleep_last); - val += pmu->sample[__I915_SAMPLE_RC6].cur; + val = ktime_since_raw(pmu->sleep_last[gt_id]); + val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6); } - if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur) - val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur; + if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED)) + val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED); else - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val; + store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val); spin_unlock_irqrestore(&pmu->lock, flags); @@ -207,28 +260,35 @@ static u64 get_rc6(struct intel_gt *gt) static void init_rc6(struct i915_pmu *pmu) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); - intel_wakeref_t wakeref; + struct drm_i915_private *i915 = pmu_to_i915(pmu); + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, i915, i) { + intel_wakeref_t wakeref; - with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = - pmu->sample[__I915_SAMPLE_RC6].cur; - pmu->sleep_last = ktime_get_raw(); + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + u64 val = __get_rc6(gt); + + store_sample(pmu, i, __I915_SAMPLE_RC6, val); + store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED, + val); + pmu->sleep_last[i] = ktime_get_raw(); + } } } -static void park_rc6(struct drm_i915_private *i915) +static void park_rc6(struct intel_gt *gt) { - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = >->i915->pmu; - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - pmu->sleep_last = ktime_get_raw(); + store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt)); + pmu->sleep_last[gt->info.id] = ktime_get_raw(); } static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) { - if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { + if (!pmu->timer_enabled && pmu_needs_timer(pmu)) { pmu->timer_enabled = true; pmu->timer_last = ktime_get(); hrtimer_start_range_ns(&pmu->timer, @@ -237,31 +297,33 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) } } -void i915_pmu_gt_parked(struct drm_i915_private *i915) +void i915_pmu_gt_parked(struct intel_gt *gt) { - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = >->i915->pmu; - if (!pmu->base.event_init) + if (!pmu->registered) return; spin_lock_irq(&pmu->lock); - park_rc6(i915); + park_rc6(gt); /* * Signal sampling timer to stop if only engine events are enabled and * GPU went idle. */ - pmu->timer_enabled = pmu_needs_timer(pmu, false); + pmu->unparked &= ~BIT(gt->info.id); + if (pmu->unparked == 0) + pmu->timer_enabled = false; spin_unlock_irq(&pmu->lock); } -void i915_pmu_gt_unparked(struct drm_i915_private *i915) +void i915_pmu_gt_unparked(struct intel_gt *gt) { - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = >->i915->pmu; - if (!pmu->base.event_init) + if (!pmu->registered) return; spin_lock_irq(&pmu->lock); @@ -269,7 +331,10 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) /* * Re-enable sampling timer when GPU goes active. */ - __i915_pmu_maybe_start_timer(pmu); + if (pmu->unparked == 0) + __i915_pmu_maybe_start_timer(pmu); + + pmu->unparked |= BIT(gt->info.id); spin_unlock_irq(&pmu->lock); } @@ -290,7 +355,7 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915) return GRAPHICS_VER(i915) == 7; } -static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) { struct intel_engine_pmu *pmu = &engine->pmu; bool busy; @@ -325,6 +390,31 @@ static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); } +static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + u32 tail, head, acthd; + + tail = ENGINE_READ_FW(engine, RING_TAIL); + head = ENGINE_READ_FW(engine, RING_HEAD); + acthd = ENGINE_READ_FW(engine, ACTHD); + + if (head & HEAD_WAIT_I8XX) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); + + if (head & HEAD_WAIT_I8XX || head != acthd || + (head & HEAD_ADDR) != (tail & TAIL_ADDR)) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} + +static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + if (GRAPHICS_VER(engine->i915) >= 3) + gen3_engine_sample(engine, period_ns); + else + gen2_engine_sample(engine, period_ns); +} + static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { @@ -340,6 +430,9 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) return; for_each_engine(engine, gt, id) { + if (!engine->pmu.enable) + continue; + if (!intel_engine_pm_get_if_awake(engine)) continue; @@ -355,35 +448,32 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) } } -static void -add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) -{ - sample->cur += mul_u32_u32(val, mul); -} - -static bool frequency_sampling_enabled(struct i915_pmu *pmu) +static bool +frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt) { return pmu->enable & - (config_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_mask(I915_PMU_REQUESTED_FREQUENCY)); + (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) | + config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt))); } static void frequency_sample(struct intel_gt *gt, unsigned int period_ns) { struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; + const unsigned int gt_id = gt->info.id; struct i915_pmu *pmu = &i915->pmu; struct intel_rps *rps = >->rps; + intel_wakeref_t wakeref; - if (!frequency_sampling_enabled(pmu)) + if (!frequency_sampling_enabled(pmu, gt_id)) return; /* Report 0/0 (actual/requested) frequency while parked. */ - if (!intel_gt_pm_get_if_awake(gt)) + wakeref = intel_gt_pm_get_if_awake(gt); + if (!wakeref) return; - if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { + if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) { u32 val; /* @@ -395,32 +485,30 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) * case we assume the system is running at the intended * frequency. Fortunately, the read should rarely fail! */ - val = intel_uncore_read_fw(uncore, GEN6_RPSTAT1); - if (val) - val = intel_rps_get_cagf(rps, val); - else - val = rps->cur_freq; - - add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], - intel_gpu_freq(rps, val), period_ns / 1000); + val = intel_rps_read_actual_frequency_fw(rps); + if (!val) + val = intel_gpu_freq(rps, rps->cur_freq); + + add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT, + val, period_ns / 1000); } - if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { - add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], - intel_gpu_freq(rps, rps->cur_freq), + if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) { + add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ, + intel_rps_get_requested_frequency(rps), period_ns / 1000); } - intel_gt_pm_put_async(gt); + intel_gt_pm_put_async(gt, wakeref); } static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) { - struct drm_i915_private *i915 = - container_of(hrtimer, struct drm_i915_private, pmu.timer); - struct i915_pmu *pmu = &i915->pmu; - struct intel_gt *gt = &i915->gt; + struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer); + struct drm_i915_private *i915 = pmu_to_i915(pmu); unsigned int period_ns; + struct intel_gt *gt; + unsigned int i; ktime_t now; if (!READ_ONCE(pmu->timer_enabled)) @@ -436,8 +524,14 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) * grabbing the forcewake. However the potential error from timer call- * back delay greatly dominates this so we keep it simple. */ - engines_sample(gt, period_ns); - frequency_sample(gt, period_ns); + + for_each_gt(gt, i915, i) { + if (!(pmu->unparked & BIT(i))) + continue; + + engines_sample(gt, period_ns); + frequency_sample(gt, period_ns); + } hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); @@ -446,8 +540,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) static void i915_pmu_event_destroy(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); drm_WARN_ON(&i915->drm, event->parent); @@ -476,9 +570,15 @@ engine_event_status(struct intel_engine_cs *engine, static int config_status(struct drm_i915_private *i915, u64 config) { - struct intel_gt *gt = &i915->gt; + struct intel_gt *gt = to_gt(i915); - switch (config) { + unsigned int gt_id = config_gt_id(config); + unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0; + + if (gt_id > max_gt_id) + return -ENOENT; + + switch (config_counter(config)) { case I915_PMU_ACTUAL_FREQUENCY: if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) /* Requires a mutex for sampling! */ @@ -489,6 +589,8 @@ config_status(struct drm_i915_private *i915, u64 config) return -ENODEV; break; case I915_PMU_INTERRUPTS: + if (gt_id) + return -ENOENT; break; case I915_PMU_RC6_RESIDENCY: if (!gt->rc6.supported) @@ -505,8 +607,8 @@ config_status(struct drm_i915_private *i915, u64 config) static int engine_event_init(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); struct intel_engine_cs *engine; engine = intel_engine_lookup_user(i915, engine_event_class(event), @@ -519,12 +621,11 @@ static int engine_event_init(struct perf_event *event) static int i915_pmu_event_init(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); int ret; - if (pmu->closed) + if (!pmu->registered) return -ENODEV; if (event->attr.type != event->pmu->type) @@ -540,10 +641,6 @@ static int i915_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; - /* only allow running on one cpu at a time */ - if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) - return -EINVAL; - if (is_engine_event(event)) ret = engine_event_init(event); else @@ -561,9 +658,8 @@ static int i915_pmu_event_init(struct perf_event *event) static u64 __i915_pmu_event_read(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); u64 val = 0; if (is_engine_event(event)) { @@ -586,25 +682,30 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = engine->pmu.sample[sample].cur; } } else { - switch (event->attr.config) { + const unsigned int gt_id = config_gt_id(event->attr.config); + const u64 config = config_counter(event->attr.config); + + switch (config) { case I915_PMU_ACTUAL_FREQUENCY: val = - div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, + div_u64(read_sample(pmu, gt_id, + __I915_SAMPLE_FREQ_ACT), USEC_PER_SEC /* to MHz */); break; case I915_PMU_REQUESTED_FREQUENCY: val = - div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, + div_u64(read_sample(pmu, gt_id, + __I915_SAMPLE_FREQ_REQ), USEC_PER_SEC /* to MHz */); break; case I915_PMU_INTERRUPTS: val = READ_ONCE(pmu->irq_count); break; case I915_PMU_RC6_RESIDENCY: - val = get_rc6(&i915->gt); + val = get_rc6(i915->gt[gt_id]); break; case I915_PMU_SOFTWARE_GT_AWAKE_TIME: - val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt)); + val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915))); break; } } @@ -614,35 +715,30 @@ static u64 __i915_pmu_event_read(struct perf_event *event) static void i915_pmu_event_read(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); struct hw_perf_event *hwc = &event->hw; - struct i915_pmu *pmu = &i915->pmu; u64 prev, new; - if (pmu->closed) { + if (!pmu->registered) { event->hw.state = PERF_HES_STOPPED; return; } -again: - prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event); - if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) - goto again; + prev = local64_read(&hwc->prev_count); + do { + new = __i915_pmu_event_read(event); + } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new)); local64_add(new - prev, &event->count); } static void i915_pmu_enable(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); + const unsigned int bit = event_bit(event); unsigned long flags; - unsigned int bit; - bit = event_bit(event); if (bit == -1) goto update; @@ -656,7 +752,7 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); GEM_BUG_ON(pmu->enable_count[bit] == ~0); - pmu->enable |= BIT_ULL(bit); + pmu->enable |= BIT(bit); pmu->enable_count[bit]++; /* @@ -701,10 +797,9 @@ update: static void i915_pmu_disable(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - unsigned int bit = event_bit(event); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); + const unsigned int bit = event_bit(event); unsigned long flags; if (bit == -1) @@ -739,8 +834,8 @@ static void i915_pmu_disable(struct perf_event *event) * bitmask when the last listener on an event goes away. */ if (--pmu->enable_count[bit] == 0) { - pmu->enable &= ~BIT_ULL(bit); - pmu->timer_enabled &= pmu_needs_timer(pmu, true); + pmu->enable &= ~BIT(bit); + pmu->timer_enabled &= pmu_needs_timer(pmu); } spin_unlock_irqrestore(&pmu->lock, flags); @@ -748,11 +843,9 @@ static void i915_pmu_disable(struct perf_event *event) static void i915_pmu_event_start(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); - if (pmu->closed) + if (!pmu->registered) return; i915_pmu_enable(event); @@ -761,19 +854,25 @@ static void i915_pmu_event_start(struct perf_event *event, int flags) static void i915_pmu_event_stop(struct perf_event *event, int flags) { + struct i915_pmu *pmu = event_to_pmu(event); + + if (!pmu->registered) + goto out; + if (flags & PERF_EF_UPDATE) i915_pmu_event_read(event); + i915_pmu_disable(event); + +out: event->hw.state = PERF_HES_STOPPED; } static int i915_pmu_event_add(struct perf_event *event, int flags) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct i915_pmu *pmu = &i915->pmu; + struct i915_pmu *pmu = event_to_pmu(event); - if (pmu->closed) + if (!pmu->registered) return -ENODEV; if (flags & PERF_EF_START) @@ -787,11 +886,6 @@ static void i915_pmu_event_del(struct perf_event *event, int flags) i915_pmu_event_stop(event, PERF_EF_UPDATE); } -static int i915_pmu_event_event_idx(struct perf_event *event) -{ - return 0; -} - struct i915_str_attribute { struct device_attribute attr; const char *str; @@ -803,7 +897,7 @@ static ssize_t i915_pmu_format_show(struct device *dev, struct i915_str_attribute *eattr; eattr = container_of(attr, struct i915_str_attribute, attr); - return sprintf(buf, "%s\n", eattr->str); + return sysfs_emit(buf, "%s\n", eattr->str); } #define I915_PMU_FORMAT_ATTR(_name, _config) \ @@ -833,31 +927,23 @@ static ssize_t i915_pmu_event_show(struct device *dev, struct i915_ext_attribute *eattr; eattr = container_of(attr, struct i915_ext_attribute, attr); - return sprintf(buf, "config=0x%lx\n", eattr->val); + return sysfs_emit(buf, "config=0x%lx\n", eattr->val); } -static ssize_t cpumask_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); +#define __event(__counter, __name, __unit) \ +{ \ + .counter = (__counter), \ + .name = (__name), \ + .unit = (__unit), \ + .global = false, \ } -static DEVICE_ATTR_RO(cpumask); - -static struct attribute *i915_cpumask_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static const struct attribute_group i915_pmu_cpumask_attr_group = { - .attrs = i915_cpumask_attrs, -}; - -#define __event(__config, __name, __unit) \ +#define __global_event(__counter, __name, __unit) \ { \ - .config = (__config), \ + .counter = (__counter), \ .name = (__name), \ .unit = (__unit), \ + .global = true, \ } #define __engine_event(__sample, __name) \ @@ -894,17 +980,18 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, static struct attribute ** create_event_attributes(struct i915_pmu *pmu) { - struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + struct drm_i915_private *i915 = pmu_to_i915(pmu); static const struct { - u64 config; + unsigned int counter; const char *name; const char *unit; + bool global; } events[] = { - __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"), - __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), - __event(I915_PMU_INTERRUPTS, "interrupts", NULL), - __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), - __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"), + __event(0, "actual-frequency", "M"), + __event(1, "requested-frequency", "M"), + __global_event(2, "interrupts", NULL), + __event(3, "rc6-residency", "ns"), + __event(4, "software-gt-awake-time", "ns"), }; static const struct { enum drm_i915_pmu_engine_sample sample; @@ -919,12 +1006,17 @@ create_event_attributes(struct i915_pmu *pmu) struct i915_ext_attribute *i915_attr = NULL, *i915_iter; struct attribute **attr = NULL, **attr_iter; struct intel_engine_cs *engine; - unsigned int i; + struct intel_gt *gt; + unsigned int i, j; /* Count how many counters we will be exposing. */ - for (i = 0; i < ARRAY_SIZE(events); i++) { - if (!config_status(i915, events[i].config)) - count++; + for_each_gt(gt, i915, j) { + for (i = 0; i < ARRAY_SIZE(events); i++) { + u64 config = ___I915_PMU_OTHER(j, events[i].counter); + + if (!config_status(i915, config)) + count++; + } } for_each_uabi_engine(engine, i915) { @@ -954,26 +1046,39 @@ create_event_attributes(struct i915_pmu *pmu) attr_iter = attr; /* Initialize supported non-engine counters. */ - for (i = 0; i < ARRAY_SIZE(events); i++) { - char *str; - - if (config_status(i915, events[i].config)) - continue; - - str = kstrdup(events[i].name, GFP_KERNEL); - if (!str) - goto err; + for_each_gt(gt, i915, j) { + for (i = 0; i < ARRAY_SIZE(events); i++) { + u64 config = ___I915_PMU_OTHER(j, events[i].counter); + char *str; - *attr_iter++ = &i915_iter->attr.attr; - i915_iter = add_i915_attr(i915_iter, str, events[i].config); + if (config_status(i915, config)) + continue; - if (events[i].unit) { - str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); + if (events[i].global || !HAS_EXTRA_GT_LIST(i915)) + str = kstrdup(events[i].name, GFP_KERNEL); + else + str = kasprintf(GFP_KERNEL, "%s-gt%u", + events[i].name, j); if (!str) goto err; - *attr_iter++ = &pmu_iter->attr.attr; - pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); + *attr_iter++ = &i915_iter->attr.attr; + i915_iter = add_i915_attr(i915_iter, str, config); + + if (events[i].unit) { + if (events[i].global || !HAS_EXTRA_GT_LIST(i915)) + str = kasprintf(GFP_KERNEL, "%s.unit", + events[i].name); + else + str = kasprintf(GFP_KERNEL, "%s-gt%u.unit", + events[i].name, j); + if (!str) + goto err; + + *attr_iter++ = &pmu_iter->attr.attr; + pmu_iter = add_pmu_attr(pmu_iter, str, + events[i].unit); + } } } @@ -1041,122 +1146,21 @@ static void free_event_attributes(struct i915_pmu *pmu) pmu->pmu_attr = NULL; } -static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); - - GEM_BUG_ON(!pmu->base.event_init); - - /* Select the first online CPU as a designated reader. */ - if (!cpumask_weight(&i915_pmu_cpumask)) - cpumask_set_cpu(cpu, &i915_pmu_cpumask); - - return 0; -} - -static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) -{ - struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node); - unsigned int target = i915_pmu_target_cpu; - - GEM_BUG_ON(!pmu->base.event_init); - - /* - * Unregistering an instance generates a CPU offline event which we must - * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask. - */ - if (pmu->closed) - return 0; - - if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { - target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); - - /* Migrate events if there is a valid target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &i915_pmu_cpumask); - i915_pmu_target_cpu = target; - } - } - - if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) { - perf_pmu_migrate_context(&pmu->base, cpu, target); - pmu->cpuhp.cpu = target; - } - - return 0; -} - -static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; - -void i915_pmu_init(void) -{ - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "perf/x86/intel/i915:online", - i915_pmu_cpu_online, - i915_pmu_cpu_offline); - if (ret < 0) - pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n", - ret); - else - cpuhp_slot = ret; -} - -void i915_pmu_exit(void) -{ - if (cpuhp_slot != CPUHP_INVALID) - cpuhp_remove_multi_state(cpuhp_slot); -} - -static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) -{ - if (cpuhp_slot == CPUHP_INVALID) - return -EINVAL; - - return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node); -} - -static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) -{ - cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node); -} - -static bool is_igp(struct drm_i915_private *i915) -{ - struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - - /* IGP is 0000:00:02.0 */ - return pci_domain_nr(pdev->bus) == 0 && - pdev->bus->number == 0 && - PCI_SLOT(pdev->devfn) == 2 && - PCI_FUNC(pdev->devfn) == 0; -} - void i915_pmu_register(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; const struct attribute_group *attr_groups[] = { &i915_pmu_format_attr_group, &pmu->events_attr_group, - &i915_pmu_cpumask_attr_group, NULL }; - int ret = -ENOMEM; - if (GRAPHICS_VER(i915) <= 2) { - drm_info(&i915->drm, "PMU not supported for this GPU."); - return; - } - spin_lock_init(&pmu->lock); - hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - pmu->timer.function = i915_sample; - pmu->cpuhp.cpu = -1; + hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL); init_rc6(pmu); - if (!is_igp(i915)) { + if (IS_DGFX(i915)) { pmu->name = kasprintf(GFP_KERNEL, "i915_%s", dev_name(i915->drm.dev)); @@ -1182,33 +1186,28 @@ void i915_pmu_register(struct drm_i915_private *i915) pmu->base.module = THIS_MODULE; pmu->base.task_ctx_nr = perf_invalid_context; + pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE; pmu->base.event_init = i915_pmu_event_init; pmu->base.add = i915_pmu_event_add; pmu->base.del = i915_pmu_event_del; pmu->base.start = i915_pmu_event_start; pmu->base.stop = i915_pmu_event_stop; pmu->base.read = i915_pmu_event_read; - pmu->base.event_idx = i915_pmu_event_event_idx; ret = perf_pmu_register(&pmu->base, pmu->name, -1); if (ret) goto err_groups; - ret = i915_pmu_register_cpuhp_state(pmu); - if (ret) - goto err_unreg; + pmu->registered = true; return; -err_unreg: - perf_pmu_unregister(&pmu->base); err_groups: kfree(pmu->base.attr_groups); err_attr: - pmu->base.event_init = NULL; free_event_attributes(pmu); err_name: - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); err: drm_notice(&i915->drm, "Failed to register PMU!\n"); @@ -1218,25 +1217,17 @@ void i915_pmu_unregister(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; - if (!pmu->base.event_init) + if (!pmu->registered) return; - /* - * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu - * ensures all currently executing ones will have exited before we - * proceed with unregistration. - */ - pmu->closed = true; - synchronize_rcu(); + /* Disconnect the PMU callbacks */ + pmu->registered = false; hrtimer_cancel(&pmu->timer); - i915_pmu_unregister_cpuhp_state(pmu); - perf_pmu_unregister(&pmu->base); - pmu->base.event_init = NULL; kfree(pmu->base.attr_groups); - if (!is_igp(i915)) + if (IS_DGFX(i915)) kfree(pmu->name); free_event_attributes(pmu); } |
