summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c688
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h104
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c33
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h26
-rw-r--r--include/uapi/drm/i915_drm.h39
9 files changed, 902 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c3649ec5b041..42bc8bd4ff06 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -45,6 +45,7 @@ i915-y := i915_drv.o \
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
+i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# GEM code
i915-y += i915_cmd_parser.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8dbcb03b5f54..0793a27e2b95 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -48,6 +48,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_pmu.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
@@ -1215,6 +1216,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
i915_gem_shrinker_init(dev_priv);
+ i915_pmu_register(dev_priv);
/*
* Notify a valid surface after modesetting,
@@ -1269,6 +1271,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_opregion_unregister(dev_priv);
i915_perf_unregister(dev_priv);
+ i915_pmu_unregister(dev_priv);
i915_teardown_sysfs(dev_priv);
i915_guc_log_unregister(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 019117144b3b..5bd5ac4cd03e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -40,6 +40,7 @@
#include <linux/hash.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
+#include <linux/perf_event.h>
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
@@ -2290,6 +2291,8 @@ struct drm_i915_private {
struct i915_gem_context *kernel_context;
/* Context only to be used for injecting preemption commands */
struct i915_gem_context *preempt_context;
+ struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
+ [MAX_ENGINE_INSTANCE + 1];
struct drm_dma_handle *status_page_dmah;
struct resource mch_res;
@@ -2761,6 +2764,8 @@ struct drm_i915_private {
int irq;
} lpe_audio;
+ struct i915_pmu pmu;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
new file mode 100644
index 000000000000..01b5ee67c1bf
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/perf_event.h>
+#include <linux/pm_runtime.h>
+
+#include "i915_drv.h"
+#include "i915_pmu.h"
+#include "intel_ringbuffer.h"
+
+/* Frequency for the sampling timer for events which need it. */
+#define FREQUENCY 200
+#define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
+
+#define ENGINE_SAMPLE_MASK \
+ (BIT(I915_SAMPLE_BUSY) | \
+ BIT(I915_SAMPLE_WAIT) | \
+ BIT(I915_SAMPLE_SEMA))
+
+#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
+
+static cpumask_t i915_pmu_cpumask = CPU_MASK_NONE;
+
+static u8 engine_config_sample(u64 config)
+{
+ return config & I915_PMU_SAMPLE_MASK;
+}
+
+static u8 engine_event_sample(struct perf_event *event)
+{
+ return engine_config_sample(event->attr.config);
+}
+
+static u8 engine_event_class(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
+}
+
+static u8 engine_event_instance(struct perf_event *event)
+{
+ return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
+}
+
+static bool is_engine_config(u64 config)
+{
+ return config < __I915_PMU_OTHER(0);
+}
+
+static unsigned int config_enabled_bit(u64 config)
+{
+ if (is_engine_config(config))
+ return engine_config_sample(config);
+ else
+ return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
+}
+
+static u64 config_enabled_mask(u64 config)
+{
+ return BIT_ULL(config_enabled_bit(config));
+}
+
+static bool is_engine_event(struct perf_event *event)
+{
+ return is_engine_config(event->attr.config);
+}
+
+static unsigned int event_enabled_bit(struct perf_event *event)
+{
+ return config_enabled_bit(event->attr.config);
+}
+
+static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
+{
+ if (!fw)
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+
+ return true;
+}
+
+static void
+update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
+{
+ /*
+ * Since we are doing stochastic sampling for these counters,
+ * average the delta with the previous value for better accuracy.
+ */
+ sample->cur += div_u64(mul_u32_u32(sample->prev + val, unit), 2);
+ sample->prev = val;
+}
+
+static void engines_sample(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool fw = false;
+
+ if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
+ return;
+
+ if (!dev_priv->gt.awake)
+ return;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return;
+
+ for_each_engine(engine, dev_priv, id) {
+ u32 current_seqno = intel_engine_get_seqno(engine);
+ u32 last_seqno = intel_engine_last_submit(engine);
+ u32 val;
+
+ val = !i915_seqno_passed(current_seqno, last_seqno);
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+ PERIOD, val);
+
+ if (val && (engine->pmu.enable &
+ (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
+ fw = grab_forcewake(dev_priv, fw);
+
+ val = I915_READ_FW(RING_CTL(engine->mmio_base));
+ } else {
+ val = 0;
+ }
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+ PERIOD, !!(val & RING_WAIT));
+
+ update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+ PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+ }
+
+ if (fw)
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void frequency_sample(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->pmu.enable &
+ config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
+ u32 val;
+
+ val = dev_priv->gt_pm.rps.cur_freq;
+ if (dev_priv->gt.awake &&
+ intel_runtime_pm_get_if_in_use(dev_priv)) {
+ val = intel_get_cagf(dev_priv,
+ I915_READ_NOTRACE(GEN6_RPSTAT1));
+ intel_runtime_pm_put(dev_priv);
+ }
+
+ update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+ 1, intel_gpu_freq(dev_priv, val));
+ }
+
+ if (dev_priv->pmu.enable &
+ config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
+ update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
+ intel_gpu_freq(dev_priv,
+ dev_priv->gt_pm.rps.cur_freq));
+ }
+}
+
+static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
+{
+ struct drm_i915_private *i915 =
+ container_of(hrtimer, struct drm_i915_private, pmu.timer);
+
+ if (i915->pmu.enable == 0)
+ return HRTIMER_NORESTART;
+
+ engines_sample(i915);
+ frequency_sample(i915);
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
+ return HRTIMER_RESTART;
+}
+
+static void i915_pmu_event_destroy(struct perf_event *event)
+{
+ WARN_ON(event->parent);
+}
+
+static int engine_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+
+ if (!intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event)))
+ return -ENODEV;
+
+ switch (engine_event_sample(event)) {
+ case I915_SAMPLE_BUSY:
+ case I915_SAMPLE_WAIT:
+ break;
+ case I915_SAMPLE_SEMA:
+ if (INTEL_GEN(i915) < 6)
+ return -ENODEV;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int i915_pmu_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ int cpu, ret;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ cpu = cpumask_any_and(&i915_pmu_cpumask,
+ topology_sibling_cpumask(event->cpu));
+ if (cpu >= nr_cpu_ids)
+ return -ENODEV;
+
+ if (is_engine_event(event)) {
+ ret = engine_event_init(event);
+ } else {
+ ret = 0;
+ switch (event->attr.config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ /* Requires a mutex for sampling! */
+ ret = -ENODEV;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ if (INTEL_GEN(i915) < 6)
+ ret = -ENODEV;
+ break;
+ default:
+ ret = -ENOENT;
+ break;
+ }
+ }
+ if (ret)
+ return ret;
+
+ event->cpu = cpu;
+ if (!event->parent)
+ event->destroy = i915_pmu_event_destroy;
+
+ return 0;
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ u64 val = 0;
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+
+ if (WARN_ON_ONCE(!engine)) {
+ /* Do nothing */
+ } else {
+ val = engine->pmu.sample[sample].cur;
+ }
+ } else {
+ switch (event->attr.config) {
+ case I915_PMU_ACTUAL_FREQUENCY:
+ val =
+ div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
+ FREQUENCY);
+ break;
+ case I915_PMU_REQUESTED_FREQUENCY:
+ val =
+ div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
+ FREQUENCY);
+ break;
+ }
+ }
+
+ return val;
+}
+
+static void i915_pmu_event_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+again:
+ prev = local64_read(&hwc->prev_count);
+ new = __i915_pmu_event_read(event);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
+ goto again;
+
+ local64_add(new - prev, &event->count);
+}
+
+static void i915_pmu_enable(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned int bit = event_enabled_bit(event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ /*
+ * Start the sampling timer when enabling the first event.
+ */
+ if (i915->pmu.enable == 0)
+ hrtimer_start_range_ns(&i915->pmu.timer,
+ ns_to_ktime(PERIOD), 0,
+ HRTIMER_MODE_REL_PINNED);
+
+ /*
+ * Update the bitmask of enabled events and increment
+ * the event reference counter.
+ */
+ GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
+ i915->pmu.enable |= BIT_ULL(bit);
+ i915->pmu.enable_count[bit]++;
+
+ /*
+ * For per-engine events the bitmask and reference counting
+ * is stored per engine.
+ */
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ GEM_BUG_ON(!engine);
+ engine->pmu.enable |= BIT(sample);
+
+ GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
+ engine->pmu.enable_count[sample]++;
+ }
+
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void i915_pmu_disable(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ unsigned int bit = event_enabled_bit(event);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (is_engine_event(event)) {
+ u8 sample = engine_event_sample(event);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ GEM_BUG_ON(!engine);
+ GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
+ GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--engine->pmu.enable_count[sample] == 0)
+ engine->pmu.enable &= ~BIT(sample);
+ }
+
+ GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
+ GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
+ /*
+ * Decrement the reference count and clear the enabled
+ * bitmask when the last listener on an event goes away.
+ */
+ if (--i915->pmu.enable_count[bit] == 0)
+ i915->pmu.enable &= ~BIT_ULL(bit);
+
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+}
+
+static void i915_pmu_event_start(struct perf_event *event, int flags)
+{
+ i915_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void i915_pmu_event_stop(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_UPDATE)
+ i915_pmu_event_read(event);
+ i915_pmu_disable(event);
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int i915_pmu_event_add(struct perf_event *event, int flags)
+{
+ if (flags & PERF_EF_START)
+ i915_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void i915_pmu_event_del(struct perf_event *event, int flags)
+{
+ i915_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int i915_pmu_event_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
+static ssize_t i915_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "%s\n", (char *)eattr->var);
+}
+
+#define I915_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+static struct attribute *i915_pmu_format_attrs[] = {
+ I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = i915_pmu_format_attrs,
+};
+
+static ssize_t i915_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "config=0x%lx\n", (unsigned long)eattr->var);
+}
+
+#define I915_EVENT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, i915_pmu_event_show, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+#define I915_EVENT_STR(_name, _str) \
+ (&((struct perf_pmu_events_attr[]) { \
+ { .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = _str, } \
+ })[0].attr.attr)
+
+#define I915_EVENT(_name, _config, _unit) \
+ I915_EVENT_ATTR(_name, _config), \
+ I915_EVENT_STR(_name.unit, _unit)
+
+#define I915_ENGINE_EVENT(_name, _class, _instance, _sample) \
+ I915_EVENT_ATTR(_name, __I915_PMU_ENGINE(_class, _instance, _sample)), \
+ I915_EVENT_STR(_name.unit, "ns")
+
+#define I915_ENGINE_EVENTS(_name, _class, _instance) \
+ I915_ENGINE_EVENT(_name##_instance-busy, _class, _instance, I915_SAMPLE_BUSY), \
+ I915_ENGINE_EVENT(_name##_instance-sema, _class, _instance, I915_SAMPLE_SEMA), \
+ I915_ENGINE_EVENT(_name##_instance-wait, _class, _instance, I915_SAMPLE_WAIT)
+
+static struct attribute *i915_pmu_events_attrs[] = {
+ I915_ENGINE_EVENTS(rcs, I915_ENGINE_CLASS_RENDER, 0),
+ I915_ENGINE_EVENTS(bcs, I915_ENGINE_CLASS_COPY, 0),
+ I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 0),
+ I915_ENGINE_EVENTS(vcs, I915_ENGINE_CLASS_VIDEO, 1),
+ I915_ENGINE_EVENTS(vecs, I915_ENGINE_CLASS_VIDEO_ENHANCE, 0),
+
+ I915_EVENT(actual-frequency, I915_PMU_ACTUAL_FREQUENCY, "MHz"),
+ I915_EVENT(requested-frequency, I915_PMU_REQUESTED_FREQUENCY, "MHz"),
+
+ NULL,
+};
+
+static const struct attribute_group i915_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = i915_pmu_events_attrs,
+};
+
+static ssize_t
+i915_pmu_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
+}
+
+static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
+
+static struct attribute *i915_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group i915_pmu_cpumask_attr_group = {
+ .attrs = i915_cpumask_attrs,
+};
+
+static const struct attribute_group *i915_pmu_attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &i915_pmu_events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+};
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ unsigned int target;
+
+ GEM_BUG_ON(!pmu->base.event_init);
+
+ target = cpumask_any_and(&i915_pmu_cpumask, &i915_pmu_cpumask);
+ /* Select the first online CPU as a designated reader. */
+ if (target >= nr_cpu_ids)
+ cpumask_set_cpu(cpu, &i915_pmu_cpumask);
+
+ return 0;
+}
+
+static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
+{
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ unsigned int target;
+
+ GEM_BUG_ON(!pmu->base.event_init);
+
+ if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
+ target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
+ /* Migrate events if there is a valid target */
+ if (target < nr_cpu_ids) {
+ cpumask_set_cpu(target, &i915_pmu_cpumask);
+ perf_pmu_migrate_context(&pmu->base, cpu, target);
+ }
+ }
+
+ return 0;
+}
+
+static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+#endif
+
+static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ enum cpuhp_state slot;
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/x86/intel/i915:online",
+ i915_pmu_cpu_online,
+ i915_pmu_cpu_offline);
+ if (ret < 0)
+ return ret;
+
+ slot = ret;
+ ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
+ if (ret) {
+ cpuhp_remove_multi_state(slot);
+ return ret;
+ }
+
+ cpuhp_slot = slot;
+#endif
+ return 0;
+}
+
+static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ WARN_ON(cpuhp_slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
+ cpuhp_remove_multi_state(cpuhp_slot);
+#endif
+}
+
+void i915_pmu_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (INTEL_GEN(i915) <= 2) {
+ DRM_INFO("PMU not supported for this GPU.");
+ return;
+ }
+
+ i915->pmu.base.attr_groups = i915_pmu_attr_groups;
+ i915->pmu.base.task_ctx_nr = perf_invalid_context;
+ i915->pmu.base.event_init = i915_pmu_event_init;
+ i915->pmu.base.add = i915_pmu_event_add;
+ i915->pmu.base.del = i915_pmu_event_del;
+ i915->pmu.base.start = i915_pmu_event_start;
+ i915->pmu.base.stop = i915_pmu_event_stop;
+ i915->pmu.base.read = i915_pmu_event_read;
+ i915->pmu.base.event_idx = i915_pmu_event_event_idx;
+
+ spin_lock_init(&i915->pmu.lock);
+ hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ i915->pmu.timer.function = i915_sample;
+
+ ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
+ if (ret)
+ goto err;
+
+ ret = i915_pmu_register_cpuhp_state(i915);
+ if (ret)
+ goto err_unreg;
+
+ return;
+
+err_unreg:
+ perf_pmu_unregister(&i915->pmu.base);
+err:
+ i915->pmu.base.event_init = NULL;
+ DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
+}
+
+void i915_pmu_unregister(struct drm_i915_private *i915)
+{
+ if (!i915->pmu.base.event_init)
+ return;
+
+ WARN_ON(i915->pmu.enable);
+
+ hrtimer_cancel(&i915->pmu.timer);
+
+ i915_pmu_unregister_cpuhp_state(i915);
+
+ perf_pmu_unregister(&i915->pmu.base);
+ i915->pmu.base.event_init = NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
new file mode 100644
index 000000000000..1ac8b2e34607
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef __I915_PMU_H__
+#define __I915_PMU_H__
+
+enum {
+ __I915_SAMPLE_FREQ_ACT = 0,
+ __I915_SAMPLE_FREQ_REQ,
+ __I915_NUM_PMU_SAMPLERS
+};
+
+/**
+ * How many different events we track in the global PMU mask.
+ *
+ * It is also used to know to needed number of event reference counters.
+ */
+#define I915_PMU_MASK_BITS \
+ ((1 << I915_PMU_SAMPLE_BITS) + \
+ (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0)))
+
+struct i915_pmu_sample {
+ u64 cur;
+ u32 prev;
+};
+
+struct i915_pmu {
+ /**
+ * @node: List node for CPU hotplug handling.
+ */
+ struct hlist_node node;
+ /**
+ * @base: PMU base.
+ */
+ struct pmu base;
+ /**
+ * @lock: Lock protecting enable mask and ref count handling.
+ */
+ spinlock_t lock;
+ /**
+ * @timer: Timer for internal i915 PMU sampling.
+ */
+ struct hrtimer timer;
+ /**
+ * @enable: Bitmask of all currently enabled events.
+ *
+ * Bits are derived from uAPI event numbers in a way that low 16 bits
+ * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is
+ * bit 0), and higher bits correspond to other events (for instance
+ * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc).
+ *
+ * In other words, low 16 bits are not per engine but per engine
+ * sampler type, while the upper bits are directly mapped to other
+ * event types.
+ */
+ u64 enable;
+ /**
+ * @enable_count: Reference counts for the enabled events.
+ *
+ * Array indices are mapped in the same way as bits in the @enable field
+ * and they are used to control sampling on/off when multiple clients
+ * are using the PMU API.
+ */
+ unsigned int enable_count[I915_PMU_MASK_BITS];
+ /**
+ * @sample: Current and previous (raw) counters for sampling events.
+ *
+ * These counters are updated from the i915 PMU sampling timer.
+ *
+ * Only global counters are held here, while the per-engine ones are in
+ * struct intel_engine_cs.
+ */
+ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+};
+
+#ifdef CONFIG_PERF_EVENTS
+void i915_pmu_register(struct drm_i915_private *i915);
+void i915_pmu_unregister(struct drm_i915_private *i915);
+#else
+static inline void i915_pmu_register(struct drm_i915_private *i915) {}
+static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 96c80fa0fcac..09bf043c1c2e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,6 +186,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define VIDEO_ENHANCEMENT_CLASS 2
#define COPY_ENGINE_CLASS 3
#define OTHER_CLASS 4
+#define MAX_ENGINE_CLASS 4
+
+#define MAX_ENGINE_INSTANCE 1
/* PCI config space */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 22c095035539..a5a494210b9e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -205,6 +205,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
class_info = &intel_engine_classes[info->class];
+ if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ return -EINVAL;
+
+ if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ return -EINVAL;
+
+ if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ return -EINVAL;
+
GEM_BUG_ON(dev_priv->engine[id]);
engine = kzalloc(sizeof(*engine), GFP_KERNEL);
if (!engine)
@@ -234,6 +243,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ dev_priv->engine_class[info->class][info->instance] = engine;
dev_priv->engine[id] = engine;
return 0;
}
@@ -1816,6 +1826,29 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
drm_printf(m, "\n");
}
+static u8 user_class_map[] = {
+ [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
+ [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
+ [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
+ [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
+};
+
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
+{
+ if (class >= ARRAY_SIZE(user_class_map))
+ return NULL;
+
+ class = user_class_map[class];
+
+ GEM_BUG_ON(class > MAX_ENGINE_CLASS);
+
+ if (instance > MAX_ENGINE_INSTANCE)
+ return NULL;
+
+ return i915->engine_class[class][instance];
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 80cd7812ce02..7ee0f18d4179 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,7 @@
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_pmu.h"
#include "i915_selftest.h"
struct drm_printer;
@@ -338,6 +339,28 @@ struct intel_engine_cs {
I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
+ struct {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to the bit number from @enable.
+ */
+ unsigned int enable_count[I915_PMU_SAMPLE_BITS];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ */
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
+ } pmu;
+
/*
* A pool of objects to use as shadow copies of client batch buffers
* when the command parser is enabled. Prevents the client from
@@ -926,4 +949,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+struct intel_engine_cs *
+intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index b57985929553..40e7b438bdaa 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -102,6 +102,45 @@ enum drm_i915_gem_engine_class {
I915_ENGINE_CLASS_INVALID = -1
};
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+ I915_SAMPLE_BUSY = 0,
+ I915_SAMPLE_WAIT = 1,
+ I915_SAMPLE_SEMA = 2,
+ I915_ENGINE_SAMPLE_MAX /* non-ABI */
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+ (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+ ((class) << I915_PMU_CLASS_SHIFT | \
+ (instance) << I915_PMU_SAMPLE_BITS | \
+ (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
+
+#define I915_PMU_LAST I915_PMU_REQUESTED_FREQUENCY
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use