diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_pmu.c')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_pmu.c | 1127 |
1 files changed, 632 insertions, 495 deletions
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..a6697db21c72 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -4,10 +4,22 @@ * Copyright © 2017-2018 Intel Corporation */ -#include <linux/irq.h> -#include "i915_pmu.h" -#include "intel_ringbuffer.h" +#include <linux/pm_runtime.h> + +#include <drm/drm_print.h> + +#include "gt/intel_engine.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" +#include "gt/intel_engine_user.h" +#include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" +#include "gt/intel_gt_regs.h" +#include "gt/intel_rc6.h" +#include "gt/intel_rps.h" + #include "i915_drv.h" +#include "i915_pmu.h" /* Frequency for the sampling timer for events which need it. */ #define FREQUENCY 200 @@ -18,9 +30,15 @@ BIT(I915_SAMPLE_WAIT) | \ BIT(I915_SAMPLE_SEMA)) -#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) +static struct i915_pmu *event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct i915_pmu, base); +} -static cpumask_t i915_pmu_cpumask; +static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu) +{ + return container_of(pmu, struct drm_i915_private, pmu); +} static u8 engine_config_sample(u64 config) { @@ -42,22 +60,70 @@ static u8 engine_event_instance(struct perf_event *event) return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; } -static bool is_engine_config(u64 config) +static bool is_engine_config(const u64 config) { return config < __I915_PMU_OTHER(0); } -static unsigned int config_enabled_bit(u64 config) +static unsigned int config_gt_id(const u64 config) +{ + return config >> __I915_PMU_GT_SHIFT; +} + +static u64 config_counter(const u64 config) +{ + return config & ~(~0ULL << __I915_PMU_GT_SHIFT); +} + +static unsigned int other_bit(const u64 config) +{ + unsigned int val; + + switch (config_counter(config)) { + case I915_PMU_ACTUAL_FREQUENCY: + val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; + break; + case I915_PMU_REQUESTED_FREQUENCY: + val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED; + break; + case I915_PMU_RC6_RESIDENCY: + val = __I915_PMU_RC6_RESIDENCY_ENABLED; + break; + default: + /* + * Events that do not require sampling, or tracking state + * transitions between enabled and disabled can be ignored. + */ + return -1; + } + + return I915_ENGINE_SAMPLE_COUNT + + config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT + + val; +} + +static unsigned int config_bit(const u64 config) { if (is_engine_config(config)) return engine_config_sample(config); else - return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); + return other_bit(config); } -static u64 config_enabled_mask(u64 config) +static __always_inline u32 config_mask(const u64 config) { - return BIT_ULL(config_enabled_bit(config)); + unsigned int bit = config_bit(config); + + if (__builtin_constant_p(bit)) + BUILD_BUG_ON(bit > + BITS_PER_TYPE(typeof_member(struct i915_pmu, + enable)) - 1); + else + WARN_ON_ONCE(bit > + BITS_PER_TYPE(typeof_member(struct i915_pmu, + enable)) - 1); + + return BIT(bit); } static bool is_engine_event(struct perf_event *event) @@ -65,43 +131,46 @@ static bool is_engine_event(struct perf_event *event) return is_engine_config(event->attr.config); } -static unsigned int event_enabled_bit(struct perf_event *event) +static unsigned int event_bit(struct perf_event *event) { - return config_enabled_bit(event->attr.config); + return config_bit(event->attr.config); } -static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) +static u32 frequency_enabled_mask(void) { - u64 enable; + unsigned int i; + u32 mask = 0; + + for (i = 0; i < I915_PMU_MAX_GT; i++) + mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) | + config_mask(__I915_PMU_REQUESTED_FREQUENCY(i)); + + return mask; +} + +static bool pmu_needs_timer(struct i915_pmu *pmu) +{ + struct drm_i915_private *i915 = pmu_to_i915(pmu); + u32 enable; /* * Only some counters need the sampling timer. * * We start with a bitmask of all currently enabled events. */ - enable = i915->pmu.enable; + enable = pmu->enable; /* * Mask out all the ones which do not need the timer, or in * other words keep all the ones that could need the timer. */ - enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | - ENGINE_SAMPLE_MASK; + enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK; /* - * When the GPU is idle per-engine counters do not need to be - * running so clear those bits out. - */ - if (!gpu_active) - enable &= ~ENGINE_SAMPLE_MASK; - /* * Also there is software busyness tracking available we do not * need the timer for I915_SAMPLE_BUSY counter. - * - * Use RCS as proxy for all engines. */ - else if (intel_engine_supports_stats(i915->engine[RCS])) + if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) enable &= ~BIT(I915_SAMPLE_BUSY); /* @@ -110,50 +179,164 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) return enable; } -void i915_pmu_gt_parked(struct drm_i915_private *i915) +static u64 __get_rc6(struct intel_gt *gt) { - if (!i915->pmu.base.event_init) - return; + struct drm_i915_private *i915 = gt->i915; + u64 val; - spin_lock_irq(&i915->pmu.lock); - /* - * Signal sampling timer to stop if only engine events are enabled and - * GPU went idle. - */ - i915->pmu.timer_enabled = pmu_needs_timer(i915, false); - spin_unlock_irq(&i915->pmu.lock); + val = intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6); + + if (HAS_RC6p(i915)) + val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6p); + + if (HAS_RC6pp(i915)) + val += intel_rc6_residency_ns(>->rc6, INTEL_RC6_RES_RC6pp); + + return val; +} + +static inline s64 ktime_since_raw(const ktime_t kt) +{ + return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); +} + +static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) +{ + return pmu->sample[gt_id][sample].cur; +} + +static void +store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) +{ + pmu->sample[gt_id][sample].cur = val; +} + +static void +add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul) +{ + pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul); +} + +static u64 get_rc6(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + const unsigned int gt_id = gt->info.id; + struct i915_pmu *pmu = &i915->pmu; + intel_wakeref_t wakeref; + unsigned long flags; + u64 val; + + wakeref = intel_gt_pm_get_if_awake(gt); + if (wakeref) { + val = __get_rc6(gt); + intel_gt_pm_put_async(gt, wakeref); + } + + spin_lock_irqsave(&pmu->lock, flags); + + if (wakeref) { + store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val); + } else { + /* + * We think we are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. + */ + val = ktime_since_raw(pmu->sleep_last[gt_id]); + val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6); + } + + if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED)) + val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED); + else + store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val); + + spin_unlock_irqrestore(&pmu->lock, flags); + + return val; +} + +static void init_rc6(struct i915_pmu *pmu) +{ + struct drm_i915_private *i915 = pmu_to_i915(pmu); + struct intel_gt *gt; + unsigned int i; + + for_each_gt(gt, i915, i) { + intel_wakeref_t wakeref; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + u64 val = __get_rc6(gt); + + store_sample(pmu, i, __I915_SAMPLE_RC6, val); + store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED, + val); + pmu->sleep_last[i] = ktime_get_raw(); + } + } +} + +static void park_rc6(struct intel_gt *gt) +{ + struct i915_pmu *pmu = >->i915->pmu; + + store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt)); + pmu->sleep_last[gt->info.id] = ktime_get_raw(); } -static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) +static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) { - if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { - i915->pmu.timer_enabled = true; - i915->pmu.timer_last = ktime_get(); - hrtimer_start_range_ns(&i915->pmu.timer, + if (!pmu->timer_enabled && pmu_needs_timer(pmu)) { + pmu->timer_enabled = true; + pmu->timer_last = ktime_get(); + hrtimer_start_range_ns(&pmu->timer, ns_to_ktime(PERIOD), 0, HRTIMER_MODE_REL_PINNED); } } -void i915_pmu_gt_unparked(struct drm_i915_private *i915) +void i915_pmu_gt_parked(struct intel_gt *gt) { - if (!i915->pmu.base.event_init) + struct i915_pmu *pmu = >->i915->pmu; + + if (!pmu->registered) return; - spin_lock_irq(&i915->pmu.lock); + spin_lock_irq(&pmu->lock); + + park_rc6(gt); + /* - * Re-enable sampling timer when GPU goes active. + * Signal sampling timer to stop if only engine events are enabled and + * GPU went idle. */ - __i915_pmu_maybe_start_timer(i915); - spin_unlock_irq(&i915->pmu.lock); + pmu->unparked &= ~BIT(gt->info.id); + if (pmu->unparked == 0) + pmu->timer_enabled = false; + + spin_unlock_irq(&pmu->lock); } -static bool grab_forcewake(struct drm_i915_private *i915, bool fw) +void i915_pmu_gt_unparked(struct intel_gt *gt) { - if (!fw) - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + struct i915_pmu *pmu = >->i915->pmu; + + if (!pmu->registered) + return; - return true; + spin_lock_irq(&pmu->lock); + + /* + * Re-enable sampling timer when GPU goes active. + */ + if (pmu->unparked == 0) + __i915_pmu_maybe_start_timer(pmu); + + pmu->unparked |= BIT(gt->info.id); + + spin_unlock_irq(&pmu->lock); } static void @@ -162,105 +345,178 @@ add_sample(struct i915_pmu_sample *sample, u32 val) sample->cur += val; } -static void -engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) +static bool exclusive_mmio_access(const struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - bool fw = false; + /* + * We have to avoid concurrent mmio cache line access on gen7 or + * risk a machine hang. For a fun history lesson dig out the old + * userspace intel_gpu_top and run it on Ivybridge or Haswell! + */ + return GRAPHICS_VER(i915) == 7; +} - if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) - return; +static void gen3_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + bool busy; + u32 val; - if (!dev_priv->gt.awake) + val = ENGINE_READ_FW(engine, RING_CTL); + if (val == 0) /* powerwell off => engine idle */ return; - if (!intel_runtime_pm_get_if_in_use(dev_priv)) + if (val & RING_WAIT) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); + if (val & RING_WAIT_SEMAPHORE) + add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); + + /* No need to sample when busy stats are supported. */ + if (intel_engine_supports_stats(engine)) return; - for_each_engine(engine, dev_priv, id) { - u32 current_seqno = intel_engine_get_seqno(engine); - u32 last_seqno = intel_engine_last_submit(engine); - u32 val; + /* + * While waiting on a semaphore or event, MI_MODE reports the + * ring as idle. However, previously using the seqno, and with + * execlists sampling, we account for the ring waiting as the + * engine being busy. Therefore, we record the sample as being + * busy if either waiting or !idle. + */ + busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); + if (!busy) { + val = ENGINE_READ_FW(engine, RING_MI_MODE); + busy = !(val & MODE_IDLE); + } + if (busy) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} - val = !i915_seqno_passed(current_seqno, last_seqno); +static void gen2_engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + struct intel_engine_pmu *pmu = &engine->pmu; + u32 tail, head, acthd; - if (val) - add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], - period_ns); + tail = ENGINE_READ_FW(engine, RING_TAIL); + head = ENGINE_READ_FW(engine, RING_HEAD); + acthd = ENGINE_READ_FW(engine, ACTHD); - if (val && (engine->pmu.enable & - (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { - fw = grab_forcewake(dev_priv, fw); + if (head & HEAD_WAIT_I8XX) + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); - val = I915_READ_FW(RING_CTL(engine->mmio_base)); - } else { - val = 0; - } + if (head & HEAD_WAIT_I8XX || head != acthd || + (head & HEAD_ADDR) != (tail & TAIL_ADDR)) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); +} - if (val & RING_WAIT) - add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], - period_ns); +static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns) +{ + if (GRAPHICS_VER(engine->i915) >= 3) + gen3_engine_sample(engine, period_ns); + else + gen2_engine_sample(engine, period_ns); +} - if (val & RING_WAIT_SEMAPHORE) - add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], - period_ns); - } +static void +engines_sample(struct intel_gt *gt, unsigned int period_ns) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; - if (fw) - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) + return; + + if (!intel_gt_pm_is_awake(gt)) + return; - intel_runtime_pm_put(dev_priv); + for_each_engine(engine, gt, id) { + if (!engine->pmu.enable) + continue; + + if (!intel_engine_pm_get_if_awake(engine)) + continue; + + if (exclusive_mmio_access(i915)) { + spin_lock_irqsave(&engine->uncore->lock, flags); + engine_sample(engine, period_ns); + spin_unlock_irqrestore(&engine->uncore->lock, flags); + } else { + engine_sample(engine, period_ns); + } + + intel_engine_pm_put_async(engine); + } } -static void -add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) +static bool +frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt) { - sample->cur += mul_u32_u32(val, mul); + return pmu->enable & + (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) | + config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt))); } static void -frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) +frequency_sample(struct intel_gt *gt, unsigned int period_ns) { - if (dev_priv->pmu.enable & - config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { + struct drm_i915_private *i915 = gt->i915; + const unsigned int gt_id = gt->info.id; + struct i915_pmu *pmu = &i915->pmu; + struct intel_rps *rps = >->rps; + intel_wakeref_t wakeref; + + if (!frequency_sampling_enabled(pmu, gt_id)) + return; + + /* Report 0/0 (actual/requested) frequency while parked. */ + wakeref = intel_gt_pm_get_if_awake(gt); + if (!wakeref) + return; + + if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) { u32 val; - val = dev_priv->gt_pm.rps.cur_freq; - if (dev_priv->gt.awake && - intel_runtime_pm_get_if_in_use(dev_priv)) { - val = intel_get_cagf(dev_priv, - I915_READ_NOTRACE(GEN6_RPSTAT1)); - intel_runtime_pm_put(dev_priv); - } + /* + * We take a quick peek here without using forcewake + * so that we don't perturb the system under observation + * (forcewake => !rc6 => increased power use). We expect + * that if the read fails because it is outside of the + * mmio power well, then it will return 0 -- in which + * case we assume the system is running at the intended + * frequency. Fortunately, the read should rarely fail! + */ + val = intel_rps_read_actual_frequency_fw(rps); + if (!val) + val = intel_gpu_freq(rps, rps->cur_freq); - add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], - intel_gpu_freq(dev_priv, val), - period_ns / 1000); + add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT, + val, period_ns / 1000); } - if (dev_priv->pmu.enable & - config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { - add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.cur_freq), + if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) { + add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ, + intel_rps_get_requested_frequency(rps), period_ns / 1000); } + + intel_gt_pm_put_async(gt, wakeref); } static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) { - struct drm_i915_private *i915 = - container_of(hrtimer, struct drm_i915_private, pmu.timer); + struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer); + struct drm_i915_private *i915 = pmu_to_i915(pmu); unsigned int period_ns; + struct intel_gt *gt; + unsigned int i; ktime_t now; - if (!READ_ONCE(i915->pmu.timer_enabled)) + if (!READ_ONCE(pmu->timer_enabled)) return HRTIMER_NORESTART; now = ktime_get(); - period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last)); - i915->pmu.timer_last = now; + period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); + pmu->timer_last = now; /* * Strictly speaking the passed in period may not be 100% accurate for @@ -268,53 +524,28 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) * grabbing the forcewake. However the potential error from timer call- * back delay greatly dominates this so we keep it simple. */ - engines_sample(i915, period_ns); - frequency_sample(i915, period_ns); - - hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); - - return HRTIMER_RESTART; -} - -static u64 count_interrupts(struct drm_i915_private *i915) -{ - /* open-coded kstat_irqs() */ - struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); - u64 sum = 0; - int cpu; - - if (!desc || !desc->kstat_irqs) - return 0; - - for_each_possible_cpu(cpu) - sum += *per_cpu_ptr(desc->kstat_irqs, cpu); - return sum; -} + for_each_gt(gt, i915, i) { + if (!(pmu->unparked & BIT(i))) + continue; -static void engine_event_destroy(struct perf_event *event) -{ - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - struct intel_engine_cs *engine; + engines_sample(gt, period_ns); + frequency_sample(gt, period_ns); + } - engine = intel_engine_lookup_user(i915, - engine_event_class(event), - engine_event_instance(event)); - if (WARN_ON_ONCE(!engine)) - return; + hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); - if (engine_event_sample(event) == I915_SAMPLE_BUSY && - intel_engine_supports_stats(engine)) - intel_disable_engine_stats(engine); + return HRTIMER_RESTART; } static void i915_pmu_event_destroy(struct perf_event *event) { - WARN_ON(event->parent); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); - if (is_engine_event(event)) - engine_event_destroy(event); + drm_WARN_ON(&i915->drm, event->parent); + + drm_dev_put(&i915->drm); } static int @@ -326,7 +557,7 @@ engine_event_status(struct intel_engine_cs *engine, case I915_SAMPLE_WAIT: break; case I915_SAMPLE_SEMA: - if (INTEL_GEN(engine->i915) < 6) + if (GRAPHICS_VER(engine->i915) < 6) return -ENODEV; break; default: @@ -339,22 +570,34 @@ engine_event_status(struct intel_engine_cs *engine, static int config_status(struct drm_i915_private *i915, u64 config) { - switch (config) { + struct intel_gt *gt = to_gt(i915); + + unsigned int gt_id = config_gt_id(config); + unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0; + + if (gt_id > max_gt_id) + return -ENOENT; + + switch (config_counter(config)) { case I915_PMU_ACTUAL_FREQUENCY: if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) /* Requires a mutex for sampling! */ return -ENODEV; - /* Fall-through. */ + fallthrough; case I915_PMU_REQUESTED_FREQUENCY: - if (INTEL_GEN(i915) < 6) + if (GRAPHICS_VER(i915) < 6) return -ENODEV; break; case I915_PMU_INTERRUPTS: + if (gt_id) + return -ENOENT; break; case I915_PMU_RC6_RESIDENCY: - if (!HAS_RC6(i915)) + if (!gt->rc6.supported) return -ENODEV; break; + case I915_PMU_SOFTWARE_GT_AWAKE_TIME: + break; default: return -ENOENT; } @@ -364,34 +607,27 @@ config_status(struct drm_i915_private *i915, u64 config) static int engine_event_init(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); struct intel_engine_cs *engine; - u8 sample; - int ret; engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); if (!engine) return -ENODEV; - sample = engine_event_sample(event); - ret = engine_event_status(engine, sample); - if (ret) - return ret; - - if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) - ret = intel_enable_engine_stats(engine); - - return ret; + return engine_event_status(engine, engine_event_sample(event)); } static int i915_pmu_event_init(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); int ret; + if (!pmu->registered) + return -ENODEV; + if (event->attr.type != event->pmu->type) return -ENOENT; @@ -405,10 +641,6 @@ static int i915_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; - /* only allow running on one cpu at a time */ - if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) - return -EINVAL; - if (is_engine_event(event)) ret = engine_event_init(event); else @@ -416,114 +648,18 @@ static int i915_pmu_event_init(struct perf_event *event) if (ret) return ret; - if (!event->parent) + if (!event->parent) { + drm_dev_get(&i915->drm); event->destroy = i915_pmu_event_destroy; - - return 0; -} - -static u64 __get_rc6(struct drm_i915_private *i915) -{ - u64 val; - - val = intel_rc6_residency_ns(i915, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); - - if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); - - if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); - - return val; -} - -static u64 get_rc6(struct drm_i915_private *i915) -{ -#if IS_ENABLED(CONFIG_PM) - unsigned long flags; - u64 val; - - if (intel_runtime_pm_get_if_in_use(i915)) { - val = __get_rc6(i915); - intel_runtime_pm_put(i915); - - /* - * If we are coming back from being runtime suspended we must - * be careful not to report a larger value than returned - * previously. - */ - - spin_lock_irqsave(&i915->pmu.lock, flags); - - if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; - i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; - } else { - val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } - - spin_unlock_irqrestore(&i915->pmu.lock, flags); - } else { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; - - /* - * We are runtime suspended. - * - * Report the delta from when the device was suspended to now, - * on top of the last known real value, as the approximated RC6 - * counter value. - */ - spin_lock_irqsave(&i915->pmu.lock, flags); - spin_lock(&kdev->power.lock); - - /* - * After the above branch intel_runtime_pm_get_if_in_use failed - * to get the runtime PM reference we cannot assume we are in - * runtime suspend since we can either: a) race with coming out - * of it before we took the power.lock, or b) there are other - * states than suspended which can bring us here. - * - * We need to double-check that we are indeed currently runtime - * suspended and if not we cannot do better than report the last - * known RC6 value. - */ - if (kdev->power.runtime_status == RPM_SUSPENDED) { - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; - - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; - - val = jiffies_to_nsecs(val); - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } else { - val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; - } - - spin_unlock(&kdev->power.lock); - spin_unlock_irqrestore(&i915->pmu.lock, flags); } - return val; -#else - return __get_rc6(i915); -#endif + return 0; } static u64 __i915_pmu_event_read(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); u64 val = 0; if (is_engine_event(event)) { @@ -534,31 +670,42 @@ static u64 __i915_pmu_event_read(struct perf_event *event) engine_event_class(event), engine_event_instance(event)); - if (WARN_ON_ONCE(!engine)) { + if (drm_WARN_ON_ONCE(&i915->drm, !engine)) { /* Do nothing */ } else if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) { - val = ktime_to_ns(intel_engine_get_busy_time(engine)); + ktime_t unused; + + val = ktime_to_ns(intel_engine_get_busy_time(engine, + &unused)); } else { val = engine->pmu.sample[sample].cur; } } else { - switch (event->attr.config) { + const unsigned int gt_id = config_gt_id(event->attr.config); + const u64 config = config_counter(event->attr.config); + + switch (config) { case I915_PMU_ACTUAL_FREQUENCY: val = - div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, + div_u64(read_sample(pmu, gt_id, + __I915_SAMPLE_FREQ_ACT), USEC_PER_SEC /* to MHz */); break; case I915_PMU_REQUESTED_FREQUENCY: val = - div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, + div_u64(read_sample(pmu, gt_id, + __I915_SAMPLE_FREQ_REQ), USEC_PER_SEC /* to MHz */); break; case I915_PMU_INTERRUPTS: - val = count_interrupts(i915); + val = READ_ONCE(pmu->irq_count); break; case I915_PMU_RC6_RESIDENCY: - val = get_rc6(i915); + val = get_rc6(i915->gt[gt_id]); + break; + case I915_PMU_SOFTWARE_GT_AWAKE_TIME: + val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915))); break; } } @@ -568,41 +715,50 @@ static u64 __i915_pmu_event_read(struct perf_event *event) static void i915_pmu_event_read(struct perf_event *event) { + struct i915_pmu *pmu = event_to_pmu(event); struct hw_perf_event *hwc = &event->hw; u64 prev, new; -again: - prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event); + if (!pmu->registered) { + event->hw.state = PERF_HES_STOPPED; + return; + } - if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) - goto again; + prev = local64_read(&hwc->prev_count); + do { + new = __i915_pmu_event_read(event); + } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new)); local64_add(new - prev, &event->count); } static void i915_pmu_enable(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - unsigned int bit = event_enabled_bit(event); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); + const unsigned int bit = event_bit(event); unsigned long flags; - spin_lock_irqsave(&i915->pmu.lock, flags); + if (bit == -1) + goto update; + + spin_lock_irqsave(&pmu->lock, flags); /* * Update the bitmask of enabled events and increment * the event reference counter. */ - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); - GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); - i915->pmu.enable |= BIT_ULL(bit); - i915->pmu.enable_count[bit]++; + BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); + GEM_BUG_ON(pmu->enable_count[bit] == ~0); + + pmu->enable |= BIT(bit); + pmu->enable_count[bit]++; /* * Start the sampling timer if needed and not already enabled. */ - __i915_pmu_maybe_start_timer(i915); + __i915_pmu_maybe_start_timer(pmu); /* * For per-engine events the bitmask and reference counting @@ -615,16 +771,22 @@ static void i915_pmu_enable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - engine->pmu.enable |= BIT(sample); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != + I915_ENGINE_SAMPLE_COUNT); + BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != + I915_ENGINE_SAMPLE_COUNT); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); + + engine->pmu.enable |= BIT(sample); engine->pmu.enable_count[sample]++; } - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&pmu->lock, flags); +update: /* * Store the current counter value so we can report the correct delta * for all listeners. Even when the event was already enabled and has @@ -635,12 +797,15 @@ static void i915_pmu_enable(struct perf_event *event) static void i915_pmu_disable(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); - unsigned int bit = event_enabled_bit(event); + struct i915_pmu *pmu = event_to_pmu(event); + struct drm_i915_private *i915 = pmu_to_i915(pmu); + const unsigned int bit = event_bit(event); unsigned long flags; - spin_lock_irqsave(&i915->pmu.lock, flags); + if (bit == -1) + return; + + spin_lock_irqsave(&pmu->lock, flags); if (is_engine_event(event)) { u8 sample = engine_event_sample(event); @@ -649,9 +814,11 @@ static void i915_pmu_disable(struct perf_event *event) engine = intel_engine_lookup_user(i915, engine_event_class(event), engine_event_instance(event)); - GEM_BUG_ON(!engine); - GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); + + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); + GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); + /* * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. @@ -660,36 +827,54 @@ static void i915_pmu_disable(struct perf_event *event) engine->pmu.enable &= ~BIT(sample); } - GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); - GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); + GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); + GEM_BUG_ON(pmu->enable_count[bit] == 0); /* * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. */ - if (--i915->pmu.enable_count[bit] == 0) { - i915->pmu.enable &= ~BIT_ULL(bit); - i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); + if (--pmu->enable_count[bit] == 0) { + pmu->enable &= ~BIT(bit); + pmu->timer_enabled &= pmu_needs_timer(pmu); } - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&pmu->lock, flags); } static void i915_pmu_event_start(struct perf_event *event, int flags) { + struct i915_pmu *pmu = event_to_pmu(event); + + if (!pmu->registered) + return; + i915_pmu_enable(event); event->hw.state = 0; } static void i915_pmu_event_stop(struct perf_event *event, int flags) { + struct i915_pmu *pmu = event_to_pmu(event); + + if (!pmu->registered) + goto out; + if (flags & PERF_EF_UPDATE) i915_pmu_event_read(event); + i915_pmu_disable(event); + +out: event->hw.state = PERF_HES_STOPPED; } static int i915_pmu_event_add(struct perf_event *event, int flags) { + struct i915_pmu *pmu = event_to_pmu(event); + + if (!pmu->registered) + return -ENODEV; + if (flags & PERF_EF_START) i915_pmu_event_start(event, flags); @@ -701,11 +886,6 @@ static void i915_pmu_event_del(struct perf_event *event, int flags) i915_pmu_event_stop(event, PERF_EF_UPDATE); } -static int i915_pmu_event_event_idx(struct perf_event *event) -{ - return 0; -} - struct i915_str_attribute { struct device_attribute attr; const char *str; @@ -717,7 +897,7 @@ static ssize_t i915_pmu_format_show(struct device *dev, struct i915_str_attribute *eattr; eattr = container_of(attr, struct i915_str_attribute, attr); - return sprintf(buf, "%s\n", eattr->str); + return sysfs_emit(buf, "%s\n", eattr->str); } #define I915_PMU_FORMAT_ATTR(_name, _config) \ @@ -747,45 +927,23 @@ static ssize_t i915_pmu_event_show(struct device *dev, struct i915_ext_attribute *eattr; eattr = container_of(attr, struct i915_ext_attribute, attr); - return sprintf(buf, "config=0x%lx\n", eattr->val); + return sysfs_emit(buf, "config=0x%lx\n", eattr->val); } -static struct attribute_group i915_pmu_events_attr_group = { - .name = "events", - /* Patch in attrs at runtime. */ -}; - -static ssize_t -i915_pmu_get_attr_cpumask(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); +#define __event(__counter, __name, __unit) \ +{ \ + .counter = (__counter), \ + .name = (__name), \ + .unit = (__unit), \ + .global = false, \ } -static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); - -static struct attribute *i915_cpumask_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static const struct attribute_group i915_pmu_cpumask_attr_group = { - .attrs = i915_cpumask_attrs, -}; - -static const struct attribute_group *i915_pmu_attr_groups[] = { - &i915_pmu_format_attr_group, - &i915_pmu_events_attr_group, - &i915_pmu_cpumask_attr_group, - NULL -}; - -#define __event(__config, __name, __unit) \ +#define __global_event(__counter, __name, __unit) \ { \ - .config = (__config), \ + .counter = (__counter), \ .name = (__name), \ .unit = (__unit), \ + .global = true, \ } #define __engine_event(__sample, __name) \ @@ -820,17 +978,20 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, } static struct attribute ** -create_event_attributes(struct drm_i915_private *i915) +create_event_attributes(struct i915_pmu *pmu) { + struct drm_i915_private *i915 = pmu_to_i915(pmu); static const struct { - u64 config; + unsigned int counter; const char *name; const char *unit; + bool global; } events[] = { - __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), - __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), - __event(I915_PMU_INTERRUPTS, "interrupts", NULL), - __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), + __event(0, "actual-frequency", "M"), + __event(1, "requested-frequency", "M"), + __global_event(2, "interrupts", NULL), + __event(3, "rc6-residency", "ns"), + __event(4, "software-gt-awake-time", "ns"), }; static const struct { enum drm_i915_pmu_engine_sample sample; @@ -845,16 +1006,20 @@ create_event_attributes(struct drm_i915_private *i915) struct i915_ext_attribute *i915_attr = NULL, *i915_iter; struct attribute **attr = NULL, **attr_iter; struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned int i; + struct intel_gt *gt; + unsigned int i, j; /* Count how many counters we will be exposing. */ - for (i = 0; i < ARRAY_SIZE(events); i++) { - if (!config_status(i915, events[i].config)) - count++; + for_each_gt(gt, i915, j) { + for (i = 0; i < ARRAY_SIZE(events); i++) { + u64 config = ___I915_PMU_OTHER(j, events[i].counter); + + if (!config_status(i915, config)) + count++; + } } - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { for (i = 0; i < ARRAY_SIZE(engine_events); i++) { if (!engine_event_status(engine, engine_events[i].sample)) @@ -881,31 +1046,44 @@ create_event_attributes(struct drm_i915_private *i915) attr_iter = attr; /* Initialize supported non-engine counters. */ - for (i = 0; i < ARRAY_SIZE(events); i++) { - char *str; - - if (config_status(i915, events[i].config)) - continue; - - str = kstrdup(events[i].name, GFP_KERNEL); - if (!str) - goto err; + for_each_gt(gt, i915, j) { + for (i = 0; i < ARRAY_SIZE(events); i++) { + u64 config = ___I915_PMU_OTHER(j, events[i].counter); + char *str; - *attr_iter++ = &i915_iter->attr.attr; - i915_iter = add_i915_attr(i915_iter, str, events[i].config); + if (config_status(i915, config)) + continue; - if (events[i].unit) { - str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); + if (events[i].global || !HAS_EXTRA_GT_LIST(i915)) + str = kstrdup(events[i].name, GFP_KERNEL); + else + str = kasprintf(GFP_KERNEL, "%s-gt%u", + events[i].name, j); if (!str) goto err; - *attr_iter++ = &pmu_iter->attr.attr; - pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); + *attr_iter++ = &i915_iter->attr.attr; + i915_iter = add_i915_attr(i915_iter, str, config); + + if (events[i].unit) { + if (events[i].global || !HAS_EXTRA_GT_LIST(i915)) + str = kasprintf(GFP_KERNEL, "%s.unit", + events[i].name); + else + str = kasprintf(GFP_KERNEL, "%s-gt%u.unit", + events[i].name, j); + if (!str) + goto err; + + *attr_iter++ = &pmu_iter->attr.attr; + pmu_iter = add_pmu_attr(pmu_iter, str, + events[i].unit); + } } } /* Initialize supported engine counters. */ - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { for (i = 0; i < ARRAY_SIZE(engine_events); i++) { char *str; @@ -922,7 +1100,7 @@ create_event_attributes(struct drm_i915_private *i915) i915_iter = add_i915_attr(i915_iter, str, __I915_PMU_ENGINE(engine->uabi_class, - engine->instance, + engine->uabi_instance, engine_events[i].sample)); str = kasprintf(GFP_KERNEL, "%s-%s.unit", @@ -935,8 +1113,8 @@ create_event_attributes(struct drm_i915_private *i915) } } - i915->pmu.i915_attr = i915_attr; - i915->pmu.pmu_attr = pmu_attr; + pmu->i915_attr = i915_attr; + pmu->pmu_attr = pmu_attr; return attr; @@ -952,145 +1130,104 @@ err_alloc: return NULL; } -static void free_event_attributes(struct drm_i915_private *i915) +static void free_event_attributes(struct i915_pmu *pmu) { - struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; + struct attribute **attr_iter = pmu->events_attr_group.attrs; for (; *attr_iter; attr_iter++) kfree((*attr_iter)->name); - kfree(i915_pmu_events_attr_group.attrs); - kfree(i915->pmu.i915_attr); - kfree(i915->pmu.pmu_attr); + kfree(pmu->events_attr_group.attrs); + kfree(pmu->i915_attr); + kfree(pmu->pmu_attr); - i915_pmu_events_attr_group.attrs = NULL; - i915->pmu.i915_attr = NULL; - i915->pmu.pmu_attr = NULL; -} - -static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) -{ - struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); - - GEM_BUG_ON(!pmu->base.event_init); - - /* Select the first online CPU as a designated reader. */ - if (!cpumask_weight(&i915_pmu_cpumask)) - cpumask_set_cpu(cpu, &i915_pmu_cpumask); - - return 0; -} - -static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) -{ - struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); - unsigned int target; - - GEM_BUG_ON(!pmu->base.event_init); - - if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { - target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); - /* Migrate events if there is a valid target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &i915_pmu_cpumask); - perf_pmu_migrate_context(&pmu->base, cpu, target); - } - } - - return 0; -} - -static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; - -static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) -{ - enum cpuhp_state slot; - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, - "perf/x86/intel/i915:online", - i915_pmu_cpu_online, - i915_pmu_cpu_offline); - if (ret < 0) - return ret; - - slot = ret; - ret = cpuhp_state_add_instance(slot, &i915->pmu.node); - if (ret) { - cpuhp_remove_multi_state(slot); - return ret; - } - - cpuhp_slot = slot; - return 0; -} - -static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) -{ - WARN_ON(cpuhp_slot == CPUHP_INVALID); - WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); - cpuhp_remove_multi_state(cpuhp_slot); + pmu->events_attr_group.attrs = NULL; + pmu->i915_attr = NULL; + pmu->pmu_attr = NULL; } void i915_pmu_register(struct drm_i915_private *i915) { - int ret; - - if (INTEL_GEN(i915) <= 2) { - DRM_INFO("PMU not supported for this GPU."); - return; + struct i915_pmu *pmu = &i915->pmu; + const struct attribute_group *attr_groups[] = { + &i915_pmu_format_attr_group, + &pmu->events_attr_group, + NULL + }; + int ret = -ENOMEM; + + spin_lock_init(&pmu->lock); + hrtimer_setup(&pmu->timer, i915_sample, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + init_rc6(pmu); + + if (IS_DGFX(i915)) { + pmu->name = kasprintf(GFP_KERNEL, + "i915_%s", + dev_name(i915->drm.dev)); + if (pmu->name) { + /* tools/perf reserves colons as special. */ + strreplace((char *)pmu->name, ':', '_'); + } + } else { + pmu->name = "i915"; } - - i915_pmu_events_attr_group.attrs = create_event_attributes(i915); - if (!i915_pmu_events_attr_group.attrs) { - ret = -ENOMEM; + if (!pmu->name) goto err; - } - i915->pmu.base.attr_groups = i915_pmu_attr_groups; - i915->pmu.base.task_ctx_nr = perf_invalid_context; - i915->pmu.base.event_init = i915_pmu_event_init; - i915->pmu.base.add = i915_pmu_event_add; - i915->pmu.base.del = i915_pmu_event_del; - i915->pmu.base.start = i915_pmu_event_start; - i915->pmu.base.stop = i915_pmu_event_stop; - i915->pmu.base.read = i915_pmu_event_read; - i915->pmu.base.event_idx = i915_pmu_event_event_idx; - - spin_lock_init(&i915->pmu.lock); - hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - i915->pmu.timer.function = i915_sample; - - ret = perf_pmu_register(&i915->pmu.base, "i915", -1); + pmu->events_attr_group.name = "events"; + pmu->events_attr_group.attrs = create_event_attributes(pmu); + if (!pmu->events_attr_group.attrs) + goto err_name; + + pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), + GFP_KERNEL); + if (!pmu->base.attr_groups) + goto err_attr; + + pmu->base.module = THIS_MODULE; + pmu->base.task_ctx_nr = perf_invalid_context; + pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE; + pmu->base.event_init = i915_pmu_event_init; + pmu->base.add = i915_pmu_event_add; + pmu->base.del = i915_pmu_event_del; + pmu->base.start = i915_pmu_event_start; + pmu->base.stop = i915_pmu_event_stop; + pmu->base.read = i915_pmu_event_read; + + ret = perf_pmu_register(&pmu->base, pmu->name, -1); if (ret) - goto err; + goto err_groups; - ret = i915_pmu_register_cpuhp_state(i915); - if (ret) - goto err_unreg; + pmu->registered = true; return; -err_unreg: - perf_pmu_unregister(&i915->pmu.base); +err_groups: + kfree(pmu->base.attr_groups); +err_attr: + free_event_attributes(pmu); +err_name: + if (IS_DGFX(i915)) + kfree(pmu->name); err: - i915->pmu.base.event_init = NULL; - free_event_attributes(i915); - DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); + drm_notice(&i915->drm, "Failed to register PMU!\n"); } void i915_pmu_unregister(struct drm_i915_private *i915) { - if (!i915->pmu.base.event_init) - return; + struct i915_pmu *pmu = &i915->pmu; - WARN_ON(i915->pmu.enable); + if (!pmu->registered) + return; - hrtimer_cancel(&i915->pmu.timer); + /* Disconnect the PMU callbacks */ + pmu->registered = false; - i915_pmu_unregister_cpuhp_state(i915); + hrtimer_cancel(&pmu->timer); - perf_pmu_unregister(&i915->pmu.base); - i915->pmu.base.event_init = NULL; - free_event_attributes(i915); + perf_pmu_unregister(&pmu->base); + kfree(pmu->base.attr_groups); + if (IS_DGFX(i915)) + kfree(pmu->name); + free_event_attributes(pmu); } |
