summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_pmu.c')
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c294
1 files changed, 199 insertions, 95 deletions
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 7ece883a7d95..d35973b41186 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -10,6 +10,7 @@
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rc6.h"
@@ -50,16 +51,26 @@ static u8 engine_event_instance(struct perf_event *event)
return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
}
-static bool is_engine_config(u64 config)
+static bool is_engine_config(const u64 config)
{
return config < __I915_PMU_OTHER(0);
}
+static unsigned int config_gt_id(const u64 config)
+{
+ return config >> __I915_PMU_GT_SHIFT;
+}
+
+static u64 config_counter(const u64 config)
+{
+ return config & ~(~0ULL << __I915_PMU_GT_SHIFT);
+}
+
static unsigned int other_bit(const u64 config)
{
unsigned int val;
- switch (config) {
+ switch (config_counter(config)) {
case I915_PMU_ACTUAL_FREQUENCY:
val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED;
break;
@@ -77,7 +88,9 @@ static unsigned int other_bit(const u64 config)
return -1;
}
- return I915_ENGINE_SAMPLE_COUNT + val;
+ return I915_ENGINE_SAMPLE_COUNT +
+ config_gt_id(config) * __I915_PMU_TRACKED_EVENT_COUNT +
+ val;
}
static unsigned int config_bit(const u64 config)
@@ -88,9 +101,20 @@ static unsigned int config_bit(const u64 config)
return other_bit(config);
}
-static u64 config_mask(u64 config)
+static u32 config_mask(const u64 config)
{
- return BIT_ULL(config_bit(config));
+ unsigned int bit = config_bit(config);
+
+ if (__builtin_constant_p(config))
+ BUILD_BUG_ON(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+ else
+ WARN_ON_ONCE(bit >
+ BITS_PER_TYPE(typeof_member(struct i915_pmu,
+ enable)) - 1);
+
+ return BIT(config_bit(config));
}
static bool is_engine_event(struct perf_event *event)
@@ -103,7 +127,19 @@ static unsigned int event_bit(struct perf_event *event)
return config_bit(event->attr.config);
}
-static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
+static u32 frequency_enabled_mask(void)
+{
+ unsigned int i;
+ u32 mask = 0;
+
+ for (i = 0; i < I915_PMU_MAX_GT; i++)
+ mask |= config_mask(__I915_PMU_ACTUAL_FREQUENCY(i)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(i));
+
+ return mask;
+}
+
+static bool pmu_needs_timer(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
u32 enable;
@@ -119,21 +155,13 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
* Mask out all the ones which do not need the timer, or in
* other words keep all the ones that could need the timer.
*/
- enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_mask(I915_PMU_REQUESTED_FREQUENCY) |
- ENGINE_SAMPLE_MASK;
+ enable &= frequency_enabled_mask() | ENGINE_SAMPLE_MASK;
/*
- * When the GPU is idle per-engine counters do not need to be
- * running so clear those bits out.
- */
- if (!gpu_active)
- enable &= ~ENGINE_SAMPLE_MASK;
- /*
* Also there is software busyness tracking available we do not
* need the timer for I915_SAMPLE_BUSY counter.
*/
- else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
+ if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@@ -163,9 +191,27 @@ static inline s64 ktime_since_raw(const ktime_t kt)
return ktime_to_ns(ktime_sub(ktime_get_raw(), kt));
}
+static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample)
+{
+ return pmu->sample[gt_id][sample].cur;
+}
+
+static void
+store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val)
+{
+ pmu->sample[gt_id][sample].cur = val;
+}
+
+static void
+add_sample_mult(struct i915_pmu *pmu, unsigned int gt_id, int sample, u32 val, u32 mul)
+{
+ pmu->sample[gt_id][sample].cur += mul_u32_u32(val, mul);
+}
+
static u64 get_rc6(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
bool awake = false;
@@ -180,7 +226,7 @@ static u64 get_rc6(struct intel_gt *gt)
spin_lock_irqsave(&pmu->lock, flags);
if (awake) {
- pmu->sample[__I915_SAMPLE_RC6].cur = val;
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
* We think we are runtime suspended.
@@ -189,14 +235,14 @@ static u64 get_rc6(struct intel_gt *gt)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- val = ktime_since_raw(pmu->sleep_last);
- val += pmu->sample[__I915_SAMPLE_RC6].cur;
+ val = ktime_since_raw(pmu->sleep_last[gt_id]);
+ val += read_sample(pmu, gt_id, __I915_SAMPLE_RC6);
}
- if (val < pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur)
- val = pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur;
+ if (val < read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED))
+ val = read_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED);
else
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = val;
+ store_sample(pmu, gt_id, __I915_SAMPLE_RC6_LAST_REPORTED, val);
spin_unlock_irqrestore(&pmu->lock, flags);
@@ -206,27 +252,34 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
- intel_wakeref_t wakeref;
+ struct intel_gt *gt;
+ unsigned int i;
+
+ for_each_gt(gt, i915, i) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
+ u64 val = __get_rc6(gt);
- with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref) {
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
- pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
- pmu->sample[__I915_SAMPLE_RC6].cur;
- pmu->sleep_last = ktime_get_raw();
+ store_sample(pmu, i, __I915_SAMPLE_RC6, val);
+ store_sample(pmu, i, __I915_SAMPLE_RC6_LAST_REPORTED,
+ val);
+ pmu->sleep_last[i] = ktime_get_raw();
+ }
}
}
-static void park_rc6(struct drm_i915_private *i915)
+static void park_rc6(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = &gt->i915->pmu;
- pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(to_gt(i915));
- pmu->sleep_last = ktime_get_raw();
+ store_sample(pmu, gt->info.id, __I915_SAMPLE_RC6, __get_rc6(gt));
+ pmu->sleep_last[gt->info.id] = ktime_get_raw();
}
static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
{
- if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
+ if (!pmu->timer_enabled && pmu_needs_timer(pmu)) {
pmu->timer_enabled = true;
pmu->timer_last = ktime_get();
hrtimer_start_range_ns(&pmu->timer,
@@ -235,29 +288,31 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
}
}
-void i915_pmu_gt_parked(struct drm_i915_private *i915)
+void i915_pmu_gt_parked(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = &gt->i915->pmu;
if (!pmu->base.event_init)
return;
spin_lock_irq(&pmu->lock);
- park_rc6(i915);
+ park_rc6(gt);
/*
* Signal sampling timer to stop if only engine events are enabled and
* GPU went idle.
*/
- pmu->timer_enabled = pmu_needs_timer(pmu, false);
+ pmu->unparked &= ~BIT(gt->info.id);
+ if (pmu->unparked == 0)
+ pmu->timer_enabled = false;
spin_unlock_irq(&pmu->lock);
}
-void i915_pmu_gt_unparked(struct drm_i915_private *i915)
+void i915_pmu_gt_unparked(struct intel_gt *gt)
{
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = &gt->i915->pmu;
if (!pmu->base.event_init)
return;
@@ -267,7 +322,10 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
/*
* Re-enable sampling timer when GPU goes active.
*/
- __i915_pmu_maybe_start_timer(pmu);
+ if (pmu->unparked == 0)
+ __i915_pmu_maybe_start_timer(pmu);
+
+ pmu->unparked |= BIT(gt->info.id);
spin_unlock_irq(&pmu->lock);
}
@@ -338,6 +396,9 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
return;
for_each_engine(engine, gt, id) {
+ if (!engine->pmu.enable)
+ continue;
+
if (!intel_engine_pm_get_if_awake(engine))
continue;
@@ -353,34 +414,30 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
}
}
-static void
-add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
-{
- sample->cur += mul_u32_u32(val, mul);
-}
-
-static bool frequency_sampling_enabled(struct i915_pmu *pmu)
+static bool
+frequency_sampling_enabled(struct i915_pmu *pmu, unsigned int gt)
{
return pmu->enable &
- (config_mask(I915_PMU_ACTUAL_FREQUENCY) |
- config_mask(I915_PMU_REQUESTED_FREQUENCY));
+ (config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt)) |
+ config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt)));
}
static void
frequency_sample(struct intel_gt *gt, unsigned int period_ns)
{
struct drm_i915_private *i915 = gt->i915;
+ const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
- if (!frequency_sampling_enabled(pmu))
+ if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
if (!intel_gt_pm_get_if_awake(gt))
return;
- if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
u32 val;
/*
@@ -396,12 +453,12 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (!val)
val = intel_gpu_freq(rps, rps->cur_freq);
- add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_ACT,
val, period_ns / 1000);
}
- if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
- add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
+ if (pmu->enable & config_mask(__I915_PMU_REQUESTED_FREQUENCY(gt_id))) {
+ add_sample_mult(pmu, gt_id, __I915_SAMPLE_FREQ_REQ,
intel_rps_get_requested_frequency(rps),
period_ns / 1000);
}
@@ -414,8 +471,9 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
struct i915_pmu *pmu = &i915->pmu;
- struct intel_gt *gt = to_gt(i915);
unsigned int period_ns;
+ struct intel_gt *gt;
+ unsigned int i;
ktime_t now;
if (!READ_ONCE(pmu->timer_enabled))
@@ -431,8 +489,14 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
* grabbing the forcewake. However the potential error from timer call-
* back delay greatly dominates this so we keep it simple.
*/
- engines_sample(gt, period_ns);
- frequency_sample(gt, period_ns);
+
+ for_each_gt(gt, i915, i) {
+ if (!(pmu->unparked & BIT(i)))
+ continue;
+
+ engines_sample(gt, period_ns);
+ frequency_sample(gt, period_ns);
+ }
hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
@@ -473,7 +537,13 @@ config_status(struct drm_i915_private *i915, u64 config)
{
struct intel_gt *gt = to_gt(i915);
- switch (config) {
+ unsigned int gt_id = config_gt_id(config);
+ unsigned int max_gt_id = HAS_EXTRA_GT_LIST(i915) ? 1 : 0;
+
+ if (gt_id > max_gt_id)
+ return -ENOENT;
+
+ switch (config_counter(config)) {
case I915_PMU_ACTUAL_FREQUENCY:
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
/* Requires a mutex for sampling! */
@@ -484,6 +554,8 @@ config_status(struct drm_i915_private *i915, u64 config)
return -ENODEV;
break;
case I915_PMU_INTERRUPTS:
+ if (gt_id)
+ return -ENOENT;
break;
case I915_PMU_RC6_RESIDENCY:
if (!gt->rc6.supported)
@@ -581,22 +653,27 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = engine->pmu.sample[sample].cur;
}
} else {
- switch (event->attr.config) {
+ const unsigned int gt_id = config_gt_id(event->attr.config);
+ const u64 config = config_counter(event->attr.config);
+
+ switch (config) {
case I915_PMU_ACTUAL_FREQUENCY:
val =
- div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_ACT),
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_REQUESTED_FREQUENCY:
val =
- div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
+ div_u64(read_sample(pmu, gt_id,
+ __I915_SAMPLE_FREQ_REQ),
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
val = READ_ONCE(pmu->irq_count);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(to_gt(i915));
+ val = get_rc6(i915->gt[gt_id]);
break;
case I915_PMU_SOFTWARE_GT_AWAKE_TIME:
val = ktime_to_ns(intel_gt_get_awake_time(to_gt(i915)));
@@ -633,11 +710,10 @@ static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
+ const unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
- unsigned int bit;
- bit = event_bit(event);
if (bit == -1)
goto update;
@@ -651,7 +727,7 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
- pmu->enable |= BIT_ULL(bit);
+ pmu->enable |= BIT(bit);
pmu->enable_count[bit]++;
/*
@@ -698,7 +774,7 @@ static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
- unsigned int bit = event_bit(event);
+ const unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
@@ -734,8 +810,8 @@ static void i915_pmu_disable(struct perf_event *event)
* bitmask when the last listener on an event goes away.
*/
if (--pmu->enable_count[bit] == 0) {
- pmu->enable &= ~BIT_ULL(bit);
- pmu->timer_enabled &= pmu_needs_timer(pmu, true);
+ pmu->enable &= ~BIT(bit);
+ pmu->timer_enabled &= pmu_needs_timer(pmu);
}
spin_unlock_irqrestore(&pmu->lock, flags);
@@ -848,11 +924,20 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs,
};
-#define __event(__config, __name, __unit) \
+#define __event(__counter, __name, __unit) \
{ \
- .config = (__config), \
+ .counter = (__counter), \
.name = (__name), \
.unit = (__unit), \
+ .global = false, \
+}
+
+#define __global_event(__counter, __name, __unit) \
+{ \
+ .counter = (__counter), \
+ .name = (__name), \
+ .unit = (__unit), \
+ .global = true, \
}
#define __engine_event(__sample, __name) \
@@ -891,15 +976,16 @@ create_event_attributes(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
static const struct {
- u64 config;
+ unsigned int counter;
const char *name;
const char *unit;
+ bool global;
} events[] = {
- __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
- __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
- __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
- __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
- __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"),
+ __event(0, "actual-frequency", "M"),
+ __event(1, "requested-frequency", "M"),
+ __global_event(2, "interrupts", NULL),
+ __event(3, "rc6-residency", "ns"),
+ __event(4, "software-gt-awake-time", "ns"),
};
static const struct {
enum drm_i915_pmu_engine_sample sample;
@@ -914,12 +1000,17 @@ create_event_attributes(struct i915_pmu *pmu)
struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
struct attribute **attr = NULL, **attr_iter;
struct intel_engine_cs *engine;
- unsigned int i;
+ struct intel_gt *gt;
+ unsigned int i, j;
/* Count how many counters we will be exposing. */
- for (i = 0; i < ARRAY_SIZE(events); i++) {
- if (!config_status(i915, events[i].config))
- count++;
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+
+ if (!config_status(i915, config))
+ count++;
+ }
}
for_each_uabi_engine(engine, i915) {
@@ -949,26 +1040,39 @@ create_event_attributes(struct i915_pmu *pmu)
attr_iter = attr;
/* Initialize supported non-engine counters. */
- for (i = 0; i < ARRAY_SIZE(events); i++) {
- char *str;
-
- if (config_status(i915, events[i].config))
- continue;
-
- str = kstrdup(events[i].name, GFP_KERNEL);
- if (!str)
- goto err;
+ for_each_gt(gt, i915, j) {
+ for (i = 0; i < ARRAY_SIZE(events); i++) {
+ u64 config = ___I915_PMU_OTHER(j, events[i].counter);
+ char *str;
- *attr_iter++ = &i915_iter->attr.attr;
- i915_iter = add_i915_attr(i915_iter, str, events[i].config);
+ if (config_status(i915, config))
+ continue;
- if (events[i].unit) {
- str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kstrdup(events[i].name, GFP_KERNEL);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u",
+ events[i].name, j);
if (!str)
goto err;
- *attr_iter++ = &pmu_iter->attr.attr;
- pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
+ *attr_iter++ = &i915_iter->attr.attr;
+ i915_iter = add_i915_attr(i915_iter, str, config);
+
+ if (events[i].unit) {
+ if (events[i].global || !HAS_EXTRA_GT_LIST(i915))
+ str = kasprintf(GFP_KERNEL, "%s.unit",
+ events[i].name);
+ else
+ str = kasprintf(GFP_KERNEL, "%s-gt%u.unit",
+ events[i].name, j);
+ if (!str)
+ goto err;
+
+ *attr_iter++ = &pmu_iter->attr.attr;
+ pmu_iter = add_pmu_attr(pmu_iter, str,
+ events[i].unit);
+ }
}
}