summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_perf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_perf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4067
1 files changed, 2909 insertions, 1158 deletions
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2b2eb57ca71f..0b9d9f3f7813 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -192,25 +192,34 @@
*/
#include <linux/anon_inodes.h>
+#include <linux/nospec.h>
#include <linux/sizes.h>
#include <linux/uuid.h>
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_internal.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_regs.h"
+#include "gt/intel_engine_user.h"
+#include "gt/intel_execlists_submission.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_clock_utils.h"
+#include "gt/intel_gt_mcr.h"
+#include "gt/intel_gt_print.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_lrc.h"
+#include "gt/intel_lrc_reg.h"
+#include "gt/intel_rc6.h"
+#include "gt/intel_ring.h"
+#include "gt/uc/intel_guc_slpc.h"
+
#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-#include "i915_oa_bdw.h"
-#include "i915_oa_chv.h"
-#include "i915_oa_sklgt2.h"
-#include "i915_oa_sklgt3.h"
-#include "i915_oa_sklgt4.h"
-#include "i915_oa_bxt.h"
-#include "i915_oa_kblgt2.h"
-#include "i915_oa_kblgt3.h"
-#include "i915_oa_glk.h"
-#include "i915_oa_cflgt2.h"
-#include "i915_oa_cflgt3.h"
-#include "i915_oa_cnl.h"
-#include "i915_oa_icl.h"
-#include "intel_lrc_reg.h"
+#include "i915_file_private.h"
+#include "i915_perf.h"
+#include "i915_perf_oa_regs.h"
+#include "i915_reg.h"
+#include "i915_mmio_range.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -230,26 +239,17 @@
*
* Although this can be observed explicitly while copying reports to userspace
* by checking for a zeroed report-id field in tail reports, we want to account
- * for this earlier, as part of the oa_buffer_check to avoid lots of redundant
- * read() attempts.
- *
- * In effect we define a tail pointer for reading that lags the real tail
- * pointer by at least %OA_TAIL_MARGIN_NSEC nanoseconds, which gives enough
- * time for the corresponding reports to become visible to the CPU.
- *
- * To manage this we actually track two tail pointers:
- * 1) An 'aging' tail with an associated timestamp that is tracked until we
- * can trust the corresponding data is visible to the CPU; at which point
- * it is considered 'aged'.
- * 2) An 'aged' tail that can be used for read()ing.
- *
- * The two separate pointers let us decouple read()s from tail pointer aging.
- *
- * The tail pointers are checked and updated at a limited rate within a hrtimer
- * callback (the same callback that is used for delivering EPOLLIN events)
- *
- * Initially the tails are marked invalid with %INVALID_TAIL_PTR which
- * indicates that an updated tail pointer is needed.
+ * for this earlier, as part of the oa_buffer_check_unlocked to avoid lots of
+ * redundant read() attempts.
+ *
+ * We workaround this issue in oa_buffer_check_unlocked() by reading the reports
+ * in the OA buffer, starting from the tail reported by the HW until we find a
+ * report with its first 2 dwords not 0 meaning its previous report is
+ * completely in memory and ready to be read. Those dwords are also set to 0
+ * once read and the whole buffer is cleared upon OA buffer initialization. The
+ * first dword is the reason for this report while the second is the timestamp,
+ * making the chances of having those 2 fields at 0 fairly unlikely. A more
+ * detailed explanation is available in oa_buffer_check_unlocked().
*
* Most of the implementation details for this workaround are in
* oa_buffer_check_unlocked() and _append_oa_reports()
@@ -264,15 +264,13 @@
#define OA_TAIL_MARGIN_NSEC 100000ULL
#define INVALID_TAIL_PTR 0xffffffff
-/* frequency for checking whether the OA unit has written new reports to the
- * circular OA buffer...
+/* The default frequency for checking whether the OA unit has written new
+ * reports to the circular OA buffer...
*/
-#define POLL_FREQUENCY 200
-#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+#define DEFAULT_POLL_FREQUENCY_HZ 200
+#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
-static int zero;
-static int one = 1;
static u32 i915_perf_stream_paranoid = true;
/* The maximum exponent the hardware accepts is 63 (essentially it selects one
@@ -289,11 +287,13 @@ static u32 i915_perf_stream_paranoid = true;
/* On Gen8+ automatically triggered OA reports include a 'reason' field... */
#define OAREPORT_REASON_MASK 0x3f
+#define OAREPORT_REASON_MASK_EXTENDED 0x7f
#define OAREPORT_REASON_SHIFT 19
#define OAREPORT_REASON_TIMER (1<<0)
#define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_CLK_RATIO (1<<5)
+#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
*
@@ -316,7 +316,7 @@ static u32 i915_oa_max_sample_rate = 100000;
* code assumes all reports have a power-of-two size and ~(size - 1) can
* be used as a mask to align the OA tail pointer.
*/
-static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+static const struct i915_oa_format oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A13] = { 0, 64 },
[I915_OA_FORMAT_A29] = { 1, 128 },
[I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
@@ -325,13 +325,17 @@ static const struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
[I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
[I915_OA_FORMAT_C4_B8] = { 7, 64 },
-};
-
-static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
[I915_OA_FORMAT_A12] = { 0, 64 },
[I915_OA_FORMAT_A12_B8_C8] = { 2, 128 },
[I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
- [I915_OA_FORMAT_C4_B8] = { 7, 64 },
+ [I915_OAR_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 },
+ [I915_OA_FORMAT_A24u40_A14u32_B8_C8] = { 5, 256 },
+ [I915_OAM_FORMAT_MPEC8u64_B8_C8] = { 1, 192, TYPE_OAM, HDR_64_BIT },
+ [I915_OAM_FORMAT_MPEC8u32_B8_C8] = { 2, 128, TYPE_OAM, HDR_64_BIT },
+};
+
+static const u32 mtl_oa_base[] = {
+ [PERF_GROUP_OAM_SAMEDIA_0] = 0x393000,
};
#define SAMPLE_OA_REPORT (1<<0)
@@ -340,11 +344,20 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = {
* struct perf_open_properties - for validated properties given to open a stream
* @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
* @single_context: Whether a single or all gpu contexts should be monitored
+ * @hold_preemption: Whether the preemption is disabled for the filtered
+ * context
* @ctx_handle: A gem ctx handle for use with @single_context
* @metrics_set: An ID for an OA unit metric set advertised via sysfs
* @oa_format: An OA unit HW report format
* @oa_periodic: Whether to enable periodic OA unit sampling
* @oa_period_exponent: The OA unit sampling period is derived from this
+ * @engine: The engine (typically rcs0) being monitored by the OA unit
+ * @has_sseu: Whether @sseu was specified by userspace
+ * @sseu: internal SSEU configuration computed either from the userspace
+ * specified configuration in the opening parameters or a default value
+ * (see get_default_sseu_config())
+ * @poll_oa_period: The period in nanoseconds at which the CPU will check for OA
+ * data availability
*
* As read_properties_unlocked() enumerates and validates the properties given
* to open a stream of metrics the configuration is built up in the structure
@@ -354,6 +367,7 @@ struct perf_open_properties {
u32 sample_flags;
u64 single_context:1;
+ u64 hold_preemption:1;
u64 ctx_handle;
/* OA sampling state */
@@ -361,71 +375,151 @@ struct perf_open_properties {
int oa_format;
bool oa_periodic;
int oa_period_exponent;
+
+ struct intel_engine_cs *engine;
+
+ bool has_sseu;
+ struct intel_sseu sseu;
+
+ u64 poll_oa_period;
};
-static void free_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
+struct i915_oa_config_bo {
+ struct llist_node node;
+
+ struct i915_oa_config *oa_config;
+ struct i915_vma *vma;
+};
+
+static struct ctl_table_header *sysctl_header;
+
+static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer);
+
+void i915_oa_config_release(struct kref *ref)
{
- if (!PTR_ERR(oa_config->flex_regs))
- kfree(oa_config->flex_regs);
- if (!PTR_ERR(oa_config->b_counter_regs))
- kfree(oa_config->b_counter_regs);
- if (!PTR_ERR(oa_config->mux_regs))
- kfree(oa_config->mux_regs);
- kfree(oa_config);
+ struct i915_oa_config *oa_config =
+ container_of(ref, typeof(*oa_config), ref);
+
+ kfree(oa_config->flex_regs);
+ kfree(oa_config->b_counter_regs);
+ kfree(oa_config->mux_regs);
+
+ kfree_rcu(oa_config, rcu);
}
-static void put_oa_config(struct drm_i915_private *dev_priv,
- struct i915_oa_config *oa_config)
+struct i915_oa_config *
+i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set)
{
- if (!atomic_dec_and_test(&oa_config->ref_count))
- return;
+ struct i915_oa_config *oa_config;
- free_oa_config(dev_priv, oa_config);
+ rcu_read_lock();
+ oa_config = idr_find(&perf->metrics_idr, metrics_set);
+ if (oa_config)
+ oa_config = i915_oa_config_get(oa_config);
+ rcu_read_unlock();
+
+ return oa_config;
}
-static int get_oa_config(struct drm_i915_private *dev_priv,
- int metrics_set,
- struct i915_oa_config **out_config)
+static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo)
{
- int ret;
+ i915_oa_config_put(oa_bo->oa_config);
+ i915_vma_put(oa_bo->vma);
+ kfree(oa_bo);
+}
- if (metrics_set == 1) {
- *out_config = &dev_priv->perf.oa.test_config;
- atomic_inc(&dev_priv->perf.oa.test_config.ref_count);
- return 0;
- }
+static inline const
+struct i915_perf_regs *__oa_regs(struct i915_perf_stream *stream)
+{
+ return &stream->engine->oa_group->regs;
+}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
- if (ret)
- return ret;
+static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set);
- if (!*out_config)
- ret = -EINVAL;
+ return intel_uncore_read(uncore, __oa_regs(stream)->oa_tail_ptr) &
+ GEN12_OAG_OATAILPTR_MASK;
+}
+
+static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
+}
+
+static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
+
+ return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+}
+
+#define oa_report_header_64bit(__s) \
+ ((__s)->oa_buffer.format->header == HDR_64_BIT)
+
+static u64 oa_report_id(struct i915_perf_stream *stream, void *report)
+{
+ return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report;
+}
+
+static u64 oa_report_reason(struct i915_perf_stream *stream, void *report)
+{
+ return (oa_report_id(stream, report) >> OAREPORT_REASON_SHIFT) &
+ (GRAPHICS_VER(stream->perf->i915) == 12 ?
+ OAREPORT_REASON_MASK_EXTENDED :
+ OAREPORT_REASON_MASK);
+}
+
+static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report)
+{
+ if (oa_report_header_64bit(stream))
+ *(u64 *)report = 0;
else
- atomic_inc(&(*out_config)->ref_count);
+ *report = 0;
+}
- mutex_unlock(&dev_priv->perf.metrics_lock);
+static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report)
+{
+ return !(oa_report_id(stream, report) &
+ stream->perf->gen8_valid_ctx_bit);
+}
- return ret;
+static u64 oa_timestamp(struct i915_perf_stream *stream, void *report)
+{
+ return oa_report_header_64bit(stream) ?
+ *((u64 *)report + 1) :
+ *((u32 *)report + 1);
}
-static u32 gen8_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static void oa_timestamp_clear(struct i915_perf_stream *stream, u32 *report)
{
- return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK;
+ if (oa_report_header_64bit(stream))
+ *(u64 *)&report[2] = 0;
+ else
+ report[1] = 0;
}
-static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
+static u32 oa_context_id(struct i915_perf_stream *stream, u32 *report)
{
- u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ u32 ctx_id = oa_report_header_64bit(stream) ? report[4] : report[2];
- return oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+ return ctx_id & stream->specific_ctx_id_mask;
+}
+
+static void oa_context_id_squash(struct i915_perf_stream *stream, u32 *report)
+{
+ if (oa_report_header_64bit(stream))
+ report[4] = INVALID_CTX_ID;
+ else
+ report[2] = INVALID_CTX_ID;
}
/**
* oa_buffer_check_unlocked - check for data and update tail ptr state
- * @dev_priv: i915 device instance
+ * @stream: i915 stream instance
*
* This is either called via fops (for blocking reads in user ctx) or the poll
* check hrtimer (atomic ctx) to check the OA buffer tail pointer and check
@@ -438,8 +532,7 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
* (See description of OA_TAIL_MARGIN_NSEC above for further details.)
*
* Besides returning true when there is data available to read() this function
- * also has the side effect of updating the oa_buffer.tails[], .aging_timestamp
- * and .aged_tail_idx state used for reading.
+ * also updates the tail in the oa_buffer object.
*
* Note: It's safe to read OA config state here unlocked, assuming that this is
* only called while the stream is enabled, while the global OA configuration
@@ -447,96 +540,74 @@ static u32 gen7_oa_hw_tail_read(struct drm_i915_private *dev_priv)
*
* Returns: %true if the OA buffer contains data, else %false
*/
-static bool oa_buffer_check_unlocked(struct drm_i915_private *dev_priv)
+static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
{
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
+ int report_size = stream->oa_buffer.format->size;
+ u32 tail, hw_tail;
unsigned long flags;
- unsigned int aged_idx;
- u32 head, hw_tail, aged_tail, aging_tail;
- u64 now;
-
- /* We have to consider the (unlikely) possibility that read() errors
- * could result in an OA buffer reset which might reset the head,
- * tails[] and aged_tail state.
- */
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ bool pollin;
+ u32 partial_report_size;
- /* NB: The head we observe here might effectively be a little out of
- * date (between head and tails[aged_idx].offset if there is currently
- * a read() in progress.
+ /*
+ * We have to consider the (unlikely) possibility that read() errors
+ * could result in an OA buffer reset which might reset the head and
+ * tail state.
*/
- head = dev_priv->perf.oa.oa_buffer.head;
-
- aged_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- aged_tail = dev_priv->perf.oa.oa_buffer.tails[aged_idx].offset;
- aging_tail = dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset;
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- hw_tail = dev_priv->perf.oa.ops.oa_hw_tail_read(dev_priv);
+ hw_tail = stream->perf->ops.oa_hw_tail_read(stream);
+ hw_tail -= gtt_offset;
- /* The tail pointer increases in 64 byte increments,
- * not in report_size steps...
+ /*
+ * The tail pointer increases in 64 byte increments, not in report_size
+ * steps. Also the report size may not be a power of 2. Compute
+ * potentially partially landed report in the OA buffer
*/
- hw_tail &= ~(report_size - 1);
+ partial_report_size = OA_TAKEN(hw_tail, stream->oa_buffer.tail);
+ partial_report_size %= report_size;
- now = ktime_get_mono_fast_ns();
+ /* Subtract partial amount off the tail */
+ hw_tail = OA_TAKEN(hw_tail, partial_report_size);
- /* Update the aged tail
- *
- * Flip the tail pointer available for read()s once the aging tail is
- * old enough to trust that the corresponding data will be visible to
- * the CPU...
+ tail = hw_tail;
+
+ /*
+ * Walk the stream backward until we find a report with report
+ * id and timestamp not at 0. Since the circular buffer pointers
+ * progress by increments of 64 bytes and that reports can be up
+ * to 256 bytes long, we can't tell whether a report has fully
+ * landed in memory before the report id and timestamp of the
+ * following report have effectively landed.
*
- * Do this before updating the aging pointer in case we may be able to
- * immediately start aging a new pointer too (if new data has become
- * available) without needing to wait for a later hrtimer callback.
+ * This is assuming that the writes of the OA unit land in
+ * memory in the order they were written to.
+ * If not : (╯°□°)╯︵ ┻━┻
*/
- if (aging_tail != INVALID_TAIL_PTR &&
- ((now - dev_priv->perf.oa.oa_buffer.aging_timestamp) >
- OA_TAIL_MARGIN_NSEC)) {
+ while (OA_TAKEN(tail, stream->oa_buffer.tail) >= report_size) {
+ void *report = stream->oa_buffer.vaddr + tail;
- aged_idx ^= 1;
- dev_priv->perf.oa.oa_buffer.aged_tail_idx = aged_idx;
-
- aged_tail = aging_tail;
+ if (oa_report_id(stream, report) ||
+ oa_timestamp(stream, report))
+ break;
- /* Mark that we need a new pointer to start aging... */
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset = INVALID_TAIL_PTR;
- aging_tail = INVALID_TAIL_PTR;
+ tail = (tail - report_size) & (OA_BUFFER_SIZE - 1);
}
- /* Update the aging tail
- *
- * We throttle aging tail updates until we have a new tail that
- * represents >= one report more data than is already available for
- * reading. This ensures there will be enough data for a successful
- * read once this new pointer has aged and ensures we will give the new
- * pointer time to age.
- */
- if (aging_tail == INVALID_TAIL_PTR &&
- (aged_tail == INVALID_TAIL_PTR ||
- OA_TAKEN(hw_tail, aged_tail) >= report_size)) {
- struct i915_vma *vma = dev_priv->perf.oa.oa_buffer.vma;
- u32 gtt_offset = i915_ggtt_offset(vma);
-
- /* Be paranoid and do a bounds check on the pointer read back
- * from hardware, just in case some spurious hardware condition
- * could put the tail out of bounds...
- */
- if (hw_tail >= gtt_offset &&
- hw_tail < (gtt_offset + OA_BUFFER_SIZE)) {
- dev_priv->perf.oa.oa_buffer.tails[!aged_idx].offset =
- aging_tail = hw_tail;
- dev_priv->perf.oa.oa_buffer.aging_timestamp = now;
- } else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n",
- hw_tail);
- }
- }
+ if (OA_TAKEN(hw_tail, tail) > report_size &&
+ __ratelimit(&stream->perf->tail_pointer_race))
+ drm_notice(&stream->uncore->i915->drm,
+ "unlanded report(s) head=0x%x tail=0x%x hw_tail=0x%x\n",
+ stream->oa_buffer.head, tail, hw_tail);
+
+ stream->oa_buffer.tail = tail;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ pollin = OA_TAKEN(stream->oa_buffer.tail,
+ stream->oa_buffer.head) >= report_size;
- return aged_tail == INVALID_TAIL_PTR ?
- false : OA_TAKEN(aged_tail, head) >= report_size;
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+
+ return pollin;
}
/**
@@ -596,10 +667,10 @@ static int append_oa_sample(struct i915_perf_stream *stream,
size_t *offset,
const u8 *report)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ int report_size = stream->oa_buffer.format->size;
struct drm_i915_perf_record_header header;
- u32 sample_flags = stream->sample_flags;
+ int report_size_partial;
+ u8 *oa_buf_end;
header.type = DRM_I915_PERF_RECORD_SAMPLE;
header.pad = 0;
@@ -613,9 +684,19 @@ static int append_oa_sample(struct i915_perf_stream *stream,
return -EFAULT;
buf += sizeof(header);
- if (sample_flags & SAMPLE_OA_REPORT) {
- if (copy_to_user(buf, report, report_size))
+ oa_buf_end = stream->oa_buffer.vaddr + OA_BUFFER_SIZE;
+ report_size_partial = oa_buf_end - report;
+
+ if (report_size_partial < report_size) {
+ if (copy_to_user(buf, report, report_size_partial))
+ return -EFAULT;
+ buf += report_size_partial;
+
+ if (copy_to_user(buf, stream->oa_buffer.vaddr,
+ report_size - report_size_partial))
return -EFAULT;
+ } else if (copy_to_user(buf, report, report_size)) {
+ return -EFAULT;
}
(*offset) += header.size;
@@ -624,7 +705,8 @@ static int append_oa_sample(struct i915_perf_stream *stream,
}
/**
- * Copies all buffered OA reports into userspace read() buffer.
+ * gen8_append_oa_reports - Copies all buffered OA reports into
+ * userspace read() buffer.
* @stream: An i915-perf stream opened for OA metrics
* @buf: destination buffer given by userspace
* @count: the number of bytes userspace wants to read
@@ -648,97 +730,55 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ int report_size = stream->oa_buffer.format->size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
- u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
-
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
-
- /*
- * An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
+ head = stream->oa_buffer.head;
+ tail = stream->oa_buffer.tail;
- /*
- * NB: oa_buffer.head/tail include the gtt_offset which we don't want
- * while indexing relative to oa_buf_base.
- */
- head -= gtt_offset;
- tail -= gtt_offset;
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* An out of bounds or misaligned head or tail pointer implies a driver
* bug since we validate + align the tail pointers we read from the
* hardware and we are in full control of the head pointer which should
- * only be incremented by multiples of the report size (notably also
- * all a power of two).
+ * only be incremented by multiples of the report size.
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE ||
+ tail > OA_BUFFER_SIZE,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
for (/* none */;
- (taken = OA_TAKEN(tail, head));
+ OA_TAKEN(tail, head);
head = (head + report_size) & mask) {
u8 *report = oa_buf_base + head;
u32 *report32 = (void *)report;
u32 ctx_id;
- u32 reason;
-
- /*
- * All the report sizes factor neatly into the buffer
- * size so we never expect to see a report split
- * between the beginning and end of the buffer.
- *
- * Given the initial alignment check a misalignment
- * here would imply a driver bug that would result
- * in an overrun.
- */
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
- break;
- }
+ u64 reason;
/*
* The reason field includes flags identifying what
* triggered this specific report (mostly timer
* triggered or e.g. due to a context switch).
- *
- * This field is never expected to be zero so we can
- * check that the report isn't invalid before copying
- * it to userspace...
*/
- reason = ((report32[0] >> OAREPORT_REASON_SHIFT) &
- OAREPORT_REASON_MASK);
- if (reason == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
- DRM_NOTE("Skipping spurious, invalid OA report\n");
- continue;
- }
-
- ctx_id = report32[2] & dev_priv->perf.oa.specific_ctx_id_mask;
+ reason = oa_report_reason(stream, report);
+ ctx_id = oa_context_id(stream, report32);
/*
* Squash whatever is in the CTX_ID field if it's marked as
@@ -747,9 +787,44 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*
* Note: that we don't clear the valid_ctx_bit so userspace can
* understand that the ID has been squashed by the kernel.
+ *
+ * Update:
+ *
+ * On XEHP platforms the behavior of context id valid bit has
+ * changed compared to prior platforms. To describe this, we
+ * define a few terms:
+ *
+ * context-switch-report: This is a report with the reason type
+ * being context-switch. It is generated when a context switches
+ * out.
+ *
+ * context-valid-bit: A bit that is set in the report ID field
+ * to indicate that a valid context has been loaded.
+ *
+ * gpu-idle: A condition characterized by a
+ * context-switch-report with context-valid-bit set to 0.
+ *
+ * On prior platforms, context-id-valid bit is set to 0 only
+ * when GPU goes idle. In all other reports, it is set to 1.
+ *
+ * On XEHP platforms, context-valid-bit is set to 1 in a context
+ * switch report if a new context switched in. For all other
+ * reports it is set to 0.
+ *
+ * This change in behavior causes an issue with MMIO triggered
+ * reports. MMIO triggered reports have the markers in the
+ * context ID field and the context-valid-bit is 0. The logic
+ * below to squash the context ID would render the report
+ * useless since the user will not be able to find it in the OA
+ * buffer. Since MMIO triggered reports exist only on XEHP,
+ * we should avoid squashing these for XEHP platforms.
*/
- if (!(report32[0] & dev_priv->perf.oa.gen8_valid_ctx_bit))
- ctx_id = report32[2] = INVALID_CTX_ID;
+
+ if (oa_report_ctx_invalid(stream, report) &&
+ GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) {
+ ctx_id = INVALID_CTX_ID;
+ oa_context_id_squash(stream, report32);
+ }
/*
* NB: For Gen 8 the OA unit no longer supports clock gating
@@ -782,19 +857,18 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* switches since it's not-uncommon for periodic samples to
* identify a switch before any 'context switch' report.
*/
- if (!dev_priv->perf.oa.exclusive_stream->ctx ||
- dev_priv->perf.oa.specific_ctx_id == ctx_id ||
- (dev_priv->perf.oa.oa_buffer.last_ctx_id ==
- dev_priv->perf.oa.specific_ctx_id) ||
+ if (!stream->ctx ||
+ stream->specific_ctx_id == ctx_id ||
+ stream->oa_buffer.last_ctx_id == stream->specific_ctx_id ||
reason & OAREPORT_REASON_CTX_SWITCH) {
/*
* While filtering for a single context we avoid
* leaking the IDs of other contexts.
*/
- if (dev_priv->perf.oa.exclusive_stream->ctx &&
- dev_priv->perf.oa.specific_ctx_id != ctx_id) {
- report32[2] = INVALID_CTX_ID;
+ if (stream->ctx &&
+ stream->specific_ctx_id != ctx_id) {
+ oa_context_id_squash(stream, report32);
}
ret = append_oa_sample(stream, buf, count, offset,
@@ -802,32 +876,49 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- dev_priv->perf.oa.oa_buffer.last_ctx_id = ctx_id;
+ stream->oa_buffer.last_ctx_id = ctx_id;
}
- /*
- * The above reason field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
- */
- report32[0] = 0;
+ if (is_power_of_2(report_size)) {
+ /*
+ * Clear out the report id and timestamp as a means
+ * to detect unlanded reports.
+ */
+ oa_report_id_clear(stream, report32);
+ oa_timestamp_clear(stream, report32);
+ } else {
+ u8 *oa_buf_end = stream->oa_buffer.vaddr +
+ OA_BUFFER_SIZE;
+ u32 part = oa_buf_end - (u8 *)report32;
+
+ /* Zero out the entire report */
+ if (report_size <= part) {
+ memset(report32, 0, report_size);
+ } else {
+ memset(report32, 0, part);
+ memset(oa_buf_base, 0, report_size - part);
+ }
+ }
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ i915_reg_t oaheadptr;
+
+ oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ?
+ __oa_regs(stream)->oa_head_ptr :
+ GEN8_OAHEADPTR;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/*
* We removed the gtt_offset for the copy loop above, indexing
* relative to oa_buf_base so put back here...
*/
- head += gtt_offset;
-
- I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK);
- dev_priv->perf.oa.oa_buffer.head = head;
+ intel_uncore_write(uncore, oaheadptr,
+ (head + gtt_offset) & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = head;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -858,14 +949,19 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus;
+ i915_reg_t oastatus_reg;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ?
+ __oa_regs(stream)->oa_status :
+ GEN8_OASTATUS;
+
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
/*
* We treat OABUFFER_OVERFLOW as a significant error:
@@ -887,17 +983,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
if (ret)
return ret;
- DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ drm_dbg(&stream->perf->i915->drm,
+ "OA buffer overflow (exponent = %d): force restart\n",
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
* reset GEN8_OASTATUS for us
*/
- oastatus = I915_READ(GEN8_OASTATUS);
+ oastatus = intel_uncore_read(uncore, oastatus_reg);
}
if (oastatus & GEN8_OASTATUS_REPORT_LOST) {
@@ -905,15 +1002,21 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- I915_WRITE(GEN8_OASTATUS,
- oastatus & ~GEN8_OASTATUS_REPORT_LOST);
+
+ intel_uncore_rmw(uncore, oastatus_reg,
+ GEN8_OASTATUS_COUNTER_OVERFLOW |
+ GEN8_OASTATUS_REPORT_LOST,
+ IS_GRAPHICS_VER(uncore->i915, 8, 11) ?
+ (GEN8_OASTATUS_HEAD_POINTER_WRAP |
+ GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
return gen8_append_oa_reports(stream, buf, count, offset);
}
/**
- * Copies all buffered OA reports into userspace read() buffer.
+ * gen7_append_oa_reports - Copies all buffered OA reports into
+ * userspace read() buffer.
* @stream: An i915-perf stream opened for OA metrics
* @buf: destination buffer given by userspace
* @count: the number of bytes userspace wants to read
@@ -937,40 +1040,25 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int report_size = dev_priv->perf.oa.oa_buffer.format_size;
- u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ int report_size = stream->oa_buffer.format->size;
+ u8 *oa_buf_base = stream->oa_buffer.vaddr;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
u32 mask = (OA_BUFFER_SIZE - 1);
size_t start_offset = *offset;
unsigned long flags;
- unsigned int aged_tail_idx;
u32 head, tail;
- u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- head = dev_priv->perf.oa.oa_buffer.head;
- aged_tail_idx = dev_priv->perf.oa.oa_buffer.aged_tail_idx;
- tail = dev_priv->perf.oa.oa_buffer.tails[aged_tail_idx].offset;
+ head = stream->oa_buffer.head;
+ tail = stream->oa_buffer.tail;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
-
- /* An invalid tail pointer here means we're still waiting for the poll
- * hrtimer callback to give us a pointer
- */
- if (tail == INVALID_TAIL_PTR)
- return -EAGAIN;
-
- /* NB: oa_buffer.head/tail include the gtt_offset which we don't want
- * while indexing relative to oa_buf_base.
- */
- head -= gtt_offset;
- tail -= gtt_offset;
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* An out of bounds or misaligned head or tail pointer implies a driver
* bug since we validate + align the tail pointers we read from the
@@ -978,15 +1066,16 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
for (/* none */;
- (taken = OA_TAKEN(tail, head));
+ OA_TAKEN(tail, head);
head = (head + report_size) & mask) {
u8 *report = oa_buf_base + head;
u32 *report32 = (void *)report;
@@ -999,8 +1088,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -1011,8 +1102,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* copying it to userspace...
*/
if (report32[0] == 0) {
- if (__ratelimit(&dev_priv->perf.oa.spurious_report_rs))
- DRM_NOTE("Skipping spurious, invalid OA report\n");
+ if (__ratelimit(&stream->perf->spurious_report_rs))
+ drm_notice(&uncore->i915->drm,
+ "Skipping spurious, invalid OA report\n");
continue;
}
@@ -1020,29 +1112,22 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
if (ret)
break;
- /* The above report-id field sanity check is based on
- * the assumption that the OA buffer is initially
- * zeroed and we reset the field after copying so the
- * check is still meaningful once old reports start
- * being overwritten.
+ /* Clear out the first 2 dwords as a mean to detect unlanded
+ * reports.
*/
report32[0] = 0;
+ report32[1] = 0;
}
if (start_offset != *offset) {
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- /* We removed the gtt_offset for the copy loop above, indexing
- * relative to oa_buf_base so put back here...
- */
- head += gtt_offset;
+ intel_uncore_write(uncore, GEN7_OASTATUS2,
+ ((head + gtt_offset) & GEN7_OASTATUS2_HEAD_MASK) |
+ GEN7_OASTATUS2_MEM_SELECT_GGTT);
+ stream->oa_buffer.head = head;
- I915_WRITE(GEN7_OASTATUS2,
- ((head & GEN7_OASTATUS2_HEAD_MASK) |
- GEN7_OASTATUS2_MEM_SELECT_GGTT));
- dev_priv->perf.oa.oa_buffer.head = head;
-
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
}
return ret;
@@ -1069,21 +1154,21 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
u32 oastatus1;
int ret;
- if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
/* XXX: On Haswell we don't have a safe way to clear oastatus1
* bits while the OA unit is enabled (while the tail pointer
* may be updated asynchronously) so we ignore status bits
* that have already been reported to userspace.
*/
- oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+ oastatus1 &= ~stream->perf->gen7_latched_oastatus1;
/* We treat OABUFFER_OVERFLOW as a significant error:
*
@@ -1111,13 +1196,14 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
if (ret)
return ret;
- DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
- dev_priv->perf.oa.period_exponent);
+ drm_dbg(&stream->perf->i915->drm,
+ "OA buffer overflow (exponent = %d): force restart\n",
+ stream->period_exponent);
- dev_priv->perf.oa.ops.oa_disable(stream);
- dev_priv->perf.oa.ops.oa_enable(stream);
+ stream->perf->ops.oa_disable(stream);
+ stream->perf->ops.oa_enable(stream);
- oastatus1 = I915_READ(GEN7_OASTATUS1);
+ oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
}
if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
@@ -1125,7 +1211,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
if (ret)
return ret;
- dev_priv->perf.oa.gen7_latched_oastatus1 |=
+ stream->perf->gen7_latched_oastatus1 |=
GEN7_OASTATUS1_REPORT_LOST;
}
@@ -1148,14 +1234,12 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
*/
static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
/* We would wait indefinitely if periodic sampling is not enabled */
- if (!dev_priv->perf.oa.periodic)
+ if (!stream->periodic)
return -EIO;
- return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
- oa_buffer_check_unlocked(dev_priv));
+ return wait_event_interruptible(stream->poll_wq,
+ oa_buffer_check_unlocked(stream));
}
/**
@@ -1172,9 +1256,7 @@ static void i915_oa_poll_wait(struct i915_perf_stream *stream,
struct file *file,
poll_table *wait)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+ poll_wait(file, &stream->poll_wq, wait);
}
/**
@@ -1194,36 +1276,241 @@ static int i915_oa_read(struct i915_perf_stream *stream,
size_t count,
size_t *offset)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+ return stream->perf->ops.read(stream, buf, count, offset);
}
-static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
- struct i915_gem_context *ctx)
+static struct intel_context *oa_pin_context(struct i915_perf_stream *stream)
{
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct i915_gem_engines_iter it;
+ struct i915_gem_context *ctx = stream->ctx;
struct intel_context *ce;
- int ret;
+ struct i915_gem_ww_ctx ww;
+ int err = -ENODEV;
- ret = i915_mutex_lock_interruptible(&i915->drm);
- if (ret)
- return ERR_PTR(ret);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (ce->engine != stream->engine) /* first match! */
+ continue;
+
+ err = 0;
+ break;
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ if (err)
+ return ERR_PTR(err);
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
/*
* As the ID is the gtt offset of the context's vma we
* pin the vma to ensure the ID remains fixed.
- *
- * NB: implied RCS engine...
*/
- ce = intel_context_pin(ctx, engine);
- mutex_unlock(&i915->drm.struct_mutex);
- if (IS_ERR(ce))
- return ce;
+ err = intel_context_pin_ww(ce, &ww);
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
- i915->perf.oa.pinned_ctx = ce;
+ if (err)
+ return ERR_PTR(err);
- return ce;
+ stream->pinned_ctx = ce;
+ return stream->pinned_ctx;
+}
+
+static int
+__store_reg_to_mem(struct i915_request *rq, i915_reg_t reg, u32 ggtt_offset)
+{
+ u32 *cs, cmd;
+
+ cmd = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (GRAPHICS_VER(rq->i915) >= 8)
+ cmd++;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg);
+ *cs++ = ggtt_offset;
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+__read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+
+ err = __store_reg_to_mem(rq, reg, ggtt_offset);
+
+ i915_request_add(rq);
+ if (!err && i915_request_wait(rq, 0, HZ / 2) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int
+gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
+{
+ struct i915_vma *scratch;
+ u32 *val;
+ int err;
+
+ scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto err_scratch;
+
+ err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
+ i915_ggtt_offset(scratch));
+ if (err)
+ goto err_scratch;
+
+ val = i915_gem_object_pin_map_unlocked(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(val)) {
+ err = PTR_ERR(val);
+ goto err_scratch;
+ }
+
+ *ctx_id = *val;
+ i915_gem_object_unpin_map(scratch->obj);
+
+err_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+/*
+ * For execlist mode of submission, pick an unused context id
+ * 0 - (NUM_CONTEXT_TAG -1) are used by other contexts
+ * XXX_MAX_CONTEXT_HW_ID is used by idle context
+ *
+ * For GuC mode of submission read context id from the upper dword of the
+ * EXECLIST_STATUS register. Note that we read this value only once and expect
+ * that the value stays fixed for the entire OA use case. There are cases where
+ * GuC KMD implementation may deregister a context to reuse it's context id, but
+ * we prevent that from happening to the OA context by pinning it.
+ */
+static int gen12_get_render_context_id(struct i915_perf_stream *stream)
+{
+ u32 ctx_id, mask;
+ int ret;
+
+ if (intel_engine_uses_guc(stream->engine)) {
+ ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id);
+ if (ret)
+ return ret;
+
+ mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
+ (GEN12_GUC_SW_CTX_ID_SHIFT - 32);
+ } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) {
+ ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
+ (XEHP_SW_CTX_ID_SHIFT - 32);
+
+ mask = ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
+ (XEHP_SW_CTX_ID_SHIFT - 32);
+ } else {
+ ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) <<
+ (GEN11_SW_CTX_ID_SHIFT - 32);
+
+ mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) <<
+ (GEN11_SW_CTX_ID_SHIFT - 32);
+ }
+ stream->specific_ctx_id = ctx_id & mask;
+ stream->specific_ctx_id_mask = mask;
+
+ return 0;
+}
+
+static bool oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
+{
+ u32 idx = *offset;
+ u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
+ bool found = false;
+
+ idx++;
+ for (; idx < len; idx += 2) {
+ if (state[idx] == reg) {
+ found = true;
+ break;
+ }
+ }
+
+ *offset = idx;
+ return found;
+}
+
+static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
+{
+ u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
+ u32 *state = ce->lrc_reg_state;
+
+ if (drm_WARN_ON(&ce->engine->i915->drm, !state))
+ return U32_MAX;
+
+ for (offset = 0; offset < len; ) {
+ if (IS_MI_LRI_CMD(state[offset])) {
+ /*
+ * We expect reg-value pairs in MI_LRI command, so
+ * MI_LRI_LEN() should be even, if not, issue a warning.
+ */
+ drm_WARN_ON(&ce->engine->i915->drm,
+ MI_LRI_LEN(state[offset]) & 0x1);
+
+ if (oa_find_reg_in_lri(state, reg, &offset, len))
+ break;
+ } else {
+ offset++;
+ }
+ }
+
+ return offset < len ? offset : U32_MAX;
+}
+
+static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
+{
+ i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
+ struct i915_perf *perf = &ce->engine->i915->perf;
+ u32 offset = perf->ctx_oactxctrl_offset;
+
+ /* Do this only once. Failure is stored as offset of U32_MAX */
+ if (offset)
+ goto exit;
+
+ offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
+ perf->ctx_oactxctrl_offset = offset;
+
+ drm_dbg(&ce->engine->i915->drm,
+ "%s oa ctx control at 0x%08x dword offset\n",
+ ce->engine->name, offset);
+
+exit:
+ return offset && offset != U32_MAX ? 0 : -ENODEV;
+}
+
+static bool engine_supports_mi_query(struct intel_engine_cs *engine)
+{
+ return engine->class == RENDER_CLASS;
}
/**
@@ -1238,28 +1525,43 @@ static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
*/
static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *i915 = stream->dev_priv;
struct intel_context *ce;
+ int ret = 0;
- ce = oa_pin_context(i915, stream->ctx);
+ ce = oa_pin_context(stream);
if (IS_ERR(ce))
return PTR_ERR(ce);
- switch (INTEL_GEN(i915)) {
+ if (engine_supports_mi_query(stream->engine) &&
+ HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) {
+ /*
+ * We are enabling perf query here. If we don't find the context
+ * offset here, just return an error.
+ */
+ ret = set_oa_ctx_ctrl_offset(ce);
+ if (ret) {
+ intel_context_unpin(ce);
+ drm_err(&stream->perf->i915->drm,
+ "Enabling perf query failed for %s\n",
+ stream->engine->name);
+ return ret;
+ }
+ }
+
+ switch (GRAPHICS_VER(ce->engine->i915)) {
case 7: {
/*
* On Haswell we don't do any post processing of the reports
* and don't need to use the mask.
*/
- i915->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
- i915->perf.oa.specific_ctx_id_mask = 0;
+ stream->specific_ctx_id = i915_ggtt_offset(ce->state);
+ stream->specific_ctx_id_mask = 0;
break;
}
case 8:
case 9:
- case 10:
- if (USES_GUC_SUBMISSION(i915)) {
+ if (intel_engine_uses_guc(ce->engine)) {
/*
* When using GuC, the context descriptor we write in
* i915 is read by GuC and rewritten before it's
@@ -1270,45 +1572,38 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
* dropped by GuC. They won't be part of the context
* ID in the OA reports, so squash those lower bits.
*/
- i915->perf.oa.specific_ctx_id =
- lower_32_bits(ce->lrc_desc) >> 12;
+ stream->specific_ctx_id = ce->lrc.lrca >> 12;
/*
* GuC uses the top bit to signal proxy submission, so
* ignore that bit.
*/
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << (GEN8_CTX_ID_WIDTH - 1)) - 1;
} else {
- i915->perf.oa.specific_ctx_id_mask =
+ stream->specific_ctx_id_mask =
(1U << GEN8_CTX_ID_WIDTH) - 1;
- i915->perf.oa.specific_ctx_id =
- upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ stream->specific_ctx_id = stream->specific_ctx_id_mask;
}
break;
- case 11: {
- i915->perf.oa.specific_ctx_id_mask =
- ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) |
- ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) |
- ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32);
- i915->perf.oa.specific_ctx_id = upper_32_bits(ce->lrc_desc);
- i915->perf.oa.specific_ctx_id &=
- i915->perf.oa.specific_ctx_id_mask;
+ case 11:
+ case 12:
+ ret = gen12_get_render_context_id(stream);
break;
- }
default:
- MISSING_CASE(INTEL_GEN(i915));
+ MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
}
- DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- i915->perf.oa.specific_ctx_id,
- i915->perf.oa.specific_ctx_id_mask);
+ ce->tag = stream->specific_ctx_id;
- return 0;
+ drm_dbg(&stream->perf->i915->drm,
+ "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
+
+ return ret;
}
/**
@@ -1320,93 +1615,118 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*/
static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_context *ce;
- dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
- dev_priv->perf.oa.specific_ctx_id_mask = 0;
-
- ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+ ce = fetch_and_zero(&stream->pinned_ctx);
if (ce) {
- mutex_lock(&dev_priv->drm.struct_mutex);
+ ce->tag = 0; /* recomputed on next submission after parking */
intel_context_unpin(ce);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
+
+ stream->specific_ctx_id = INVALID_CTX_ID;
+ stream->specific_ctx_id_mask = 0;
}
static void
-free_oa_buffer(struct drm_i915_private *i915)
+free_oa_buffer(struct i915_perf_stream *stream)
{
- mutex_lock(&i915->drm.struct_mutex);
-
- i915_vma_unpin_and_release(&i915->perf.oa.oa_buffer.vma,
+ i915_vma_unpin_and_release(&stream->oa_buffer.vma,
I915_VMA_RELEASE_MAP);
- mutex_unlock(&i915->drm.struct_mutex);
+ stream->oa_buffer.vaddr = NULL;
+}
+
+static void
+free_oa_configs(struct i915_perf_stream *stream)
+{
+ struct i915_oa_config_bo *oa_bo, *tmp;
+
+ i915_oa_config_put(stream->oa_config);
+ llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
+ free_oa_config_bo(oa_bo);
+}
- i915->perf.oa.oa_buffer.vaddr = NULL;
+static void
+free_noa_wait(struct i915_perf_stream *stream)
+{
+ i915_vma_unpin_and_release(&stream->noa_wait, 0);
+}
+
+static bool engine_supports_oa(const struct intel_engine_cs *engine)
+{
+ return engine->oa_group;
+}
+
+static bool engine_supports_oa_format(struct intel_engine_cs *engine, int type)
+{
+ return engine->oa_group && engine->oa_group->type == type;
}
static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
+ struct intel_gt *gt = stream->engine->gt;
+ struct i915_perf_group *g = stream->engine->oa_group;
+ int m;
- BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+ if (WARN_ON(stream != g->exclusive_stream))
+ return;
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
+ *
+ * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- mutex_lock(&dev_priv->drm.struct_mutex);
- dev_priv->perf.oa.exclusive_stream = NULL;
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ WRITE_ONCE(g->exclusive_stream, NULL);
+ perf->ops.disable_metric_set(stream);
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
if (stream->ctx)
oa_put_render_ctx_id(stream);
- put_oa_config(dev_priv, stream->oa_config);
+ free_oa_configs(stream);
+ free_noa_wait(stream);
- if (dev_priv->perf.oa.spurious_report_rs.missed) {
- DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n",
- dev_priv->perf.oa.spurious_report_rs.missed);
- }
+ m = ratelimit_state_get_miss(&perf->spurious_report_rs);
+ if (m)
+ gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
}
-static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
/* Pre-DevBDW: OABUFFER must be set with counters off,
* before OASTATUS1, but after OASTATUS2
*/
- I915_WRITE(GEN7_OASTATUS2,
- gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */
+ gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT);
+ stream->oa_buffer.head = 0;
- I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset);
- I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+ intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */
+ gtt_offset | OABUFFER_SIZE_16M);
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = 0;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/* On Haswell we have to track which OASTATUS1 flags we've
* already seen since they can't be cleared while periodic
* sampling is enabled.
*/
- dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+ stream->perf->gen7_latched_oastatus1 = 0;
/* NB: although the OA buffer will initially be allocated
* zeroed via shmfs (and so this memset is redundant when
@@ -1419,26 +1739,22 @@ static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
-
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
- dev_priv->perf.oa.pollin = false;
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
}
-static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
+static void gen8_init_oa_buffer(struct i915_perf_stream *stream)
{
- u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
unsigned long flags;
- spin_lock_irqsave(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- I915_WRITE(GEN8_OASTATUS, 0);
- I915_WRITE(GEN8_OAHEADPTR, gtt_offset);
- dev_priv->perf.oa.oa_buffer.head = gtt_offset;
+ intel_uncore_write(uncore, GEN8_OASTATUS, 0);
+ intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset);
+ stream->oa_buffer.head = 0;
- I915_WRITE(GEN8_OABUFFER_UDW, 0);
+ intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0);
/*
* PRM says:
@@ -1448,22 +1764,21 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* to enable proper functionality of the overflow
* bit."
*/
- I915_WRITE(GEN8_OABUFFER, gtt_offset |
+ intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset |
OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
- I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
+ intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK);
/* Mark that we need updated tail pointers to read from... */
- dev_priv->perf.oa.oa_buffer.tails[0].offset = INVALID_TAIL_PTR;
- dev_priv->perf.oa.oa_buffer.tails[1].offset = INVALID_TAIL_PTR;
+ stream->oa_buffer.tail = 0;
/*
* Reset state used to recognise context switches, affecting which
* reports we will forward to userspace while filtering for a single
* context.
*/
- dev_priv->perf.oa.oa_buffer.last_ctx_id = INVALID_CTX_ID;
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
- spin_unlock_irqrestore(&dev_priv->perf.oa.oa_buffer.ptr_lock, flags);
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
/*
* NB: although the OA buffer will initially be allocated
@@ -1477,62 +1792,112 @@ static void gen8_init_oa_buffer(struct drm_i915_private *dev_priv)
* the assumption that new reports are being written to zeroed
* memory...
*/
- memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+}
+
+static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
+
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_status, 0);
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_head_ptr,
+ gtt_offset & GEN12_OAG_OAHEADPTR_MASK);
+ stream->oa_buffer.head = 0;
/*
- * Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
+ * PRM says:
+ *
+ * "This MMIO must be set before the OATAILPTR
+ * register and after the OAHEADPTR register. This is
+ * to enable proper functionality of the overflow
+ * bit."
+ */
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_buffer, gtt_offset |
+ OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT);
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_tail_ptr,
+ gtt_offset & GEN12_OAG_OATAILPTR_MASK);
+
+ /* Mark that we need updated tail pointers to read from... */
+ stream->oa_buffer.tail = 0;
+
+ /*
+ * Reset state used to recognise context switches, affecting which
+ * reports we will forward to userspace while filtering for a single
+ * context.
+ */
+ stream->oa_buffer.last_ctx_id = INVALID_CTX_ID;
+
+ spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
+
+ /*
+ * NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen8_append_oa_reports() that looks at the
+ * reason field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
*/
- dev_priv->perf.oa.pollin = false;
+ memset(stream->oa_buffer.vaddr, 0,
+ stream->oa_buffer.vma->size);
}
-static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_gt *gt = stream->engine->gt;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
int ret;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
return -ENODEV;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- return ret;
-
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
- bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
+ bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate OA buffer\n");
- ret = PTR_ERR(bo);
- goto unlock;
+ drm_err(&i915->drm, "Failed to allocate OA buffer\n");
+ return PTR_ERR(bo);
}
- ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
+ i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
- vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
+ vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
}
- dev_priv->perf.oa.oa_buffer.vma = vma;
- dev_priv->perf.oa.oa_buffer.vaddr =
- i915_gem_object_pin_map(bo, I915_MAP_WB);
- if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
- ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
- goto err_unpin;
+ /*
+ * PreHSW required 512K alignment.
+ * HSW and onwards, align to requested size of OA buffer.
+ */
+ ret = i915_vma_pin(vma, 0, SZ_16M, PIN_GLOBAL | PIN_HIGH);
+ if (ret) {
+ gt_err(gt, "Failed to pin OA buffer %d\n", ret);
+ goto err_unref;
}
- DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
- i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
- dev_priv->perf.oa.oa_buffer.vaddr);
+ stream->oa_buffer.vma = vma;
- goto unlock;
+ stream->oa_buffer.vaddr =
+ i915_gem_object_pin_map_unlocked(bo, I915_MAP_WB);
+ if (IS_ERR(stream->oa_buffer.vaddr)) {
+ ret = PTR_ERR(stream->oa_buffer.vaddr);
+ goto err_unpin;
+ }
+
+ return 0;
err_unpin:
__i915_vma_unpin(vma);
@@ -1540,33 +1905,487 @@ err_unpin:
err_unref:
i915_gem_object_put(bo);
- dev_priv->perf.oa.oa_buffer.vaddr = NULL;
- dev_priv->perf.oa.oa_buffer.vma = NULL;
+ stream->oa_buffer.vaddr = NULL;
+ stream->oa_buffer.vma = NULL;
-unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
-static void config_oa_regs(struct drm_i915_private *dev_priv,
- const struct i915_oa_reg *regs,
- u32 n_regs)
+static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
+ bool save, i915_reg_t reg, u32 offset,
+ u32 dword_count)
+{
+ u32 cmd;
+ u32 d;
+
+ cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ cmd |= MI_SRM_LRM_GLOBAL_GTT;
+ if (GRAPHICS_VER(stream->perf->i915) >= 8)
+ cmd++;
+
+ for (d = 0; d < dword_count; d++) {
+ *cs++ = cmd;
+ *cs++ = i915_mmio_reg_offset(reg) + 4 * d;
+ *cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d;
+ *cs++ = 0;
+ }
+
+ return cs;
+}
+
+static int alloc_noa_wait(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_gt *gt = stream->engine->gt;
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ const u64 delay_ticks = 0xffffffffffffffff -
+ intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915),
+ atomic64_read(&stream->perf->noa_programming_delay));
+ const u32 base = stream->engine->mmio_base;
+#define CS_GPR(x) GEN8_RING_CS_GPR(base, x)
+ u32 *batch, *ts0, *cs, *jump;
+ struct i915_gem_ww_ctx ww;
+ int ret, i;
+ enum {
+ START_TS,
+ NOW_TS,
+ DELTA_TS,
+ JUMP_PREDICATE,
+ DELTA_TARGET,
+ N_CS_GPR
+ };
+ i915_reg_t mi_predicate_result = HAS_MI_SET_PREDICATE(i915) ?
+ MI_PREDICATE_RESULT_2_ENGINE(base) :
+ MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
+
+ /*
+ * gt->scratch was being used to save/restore the GPR registers, but on
+ * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
+ * causes an engine hang. Instead allocate an additional page here to
+ * save/restore GPR registers
+ */
+ bo = i915_gem_object_create_internal(i915, 8192);
+ if (IS_ERR(bo)) {
+ drm_err(&i915->drm,
+ "Failed to allocate NOA wait batchbuffer\n");
+ return PTR_ERR(bo);
+ }
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(bo, &ww);
+ if (ret)
+ goto out_ww;
+
+ /*
+ * We pin in GGTT because we jump into this buffer now because
+ * multiple OA config BOs will have a jump to this address and it
+ * needs to be fixed during the lifetime of the i915/perf stream.
+ */
+ vma = i915_vma_instance(bo, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out_ww;
+ }
+
+ ret = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto out_ww;
+
+ batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(batch)) {
+ ret = PTR_ERR(batch);
+ goto err_unpin;
+ }
+
+ stream->noa_wait = vma;
+
+#define GPR_SAVE_OFFSET 4096
+#define PREDICATE_SAVE_OFFSET 4160
+
+ /* Save registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, true /* save */, CS_GPR(i),
+ GPR_SAVE_OFFSET + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, true /* save */, mi_predicate_result,
+ PREDICATE_SAVE_OFFSET, 1);
+
+ /* First timestamp snapshot location. */
+ ts0 = cs;
+
+ /*
+ * Initial snapshot of the timestamp register to implement the wait.
+ * We work with 32b values, so clear out the top 32b bits of the
+ * register because the ALU works 64bits.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS));
+
+ /*
+ * This is the location we're going to jump back into until the
+ * required amount of time has passed.
+ */
+ jump = cs;
+
+ /*
+ * Take another snapshot of the timestamp register. Take care to clear
+ * up the top 32bits of CS_GPR(1) as we're using it for other
+ * operations below.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4;
+ *cs++ = 0;
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base));
+ *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS));
+
+ /*
+ * Do a diff between the 2 timestamps and store the result back into
+ * CS_GPR(1).
+ */
+ *cs++ = MI_MATH(5);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS));
+ *cs++ = MI_MATH_SUB;
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU);
+ *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ /*
+ * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the
+ * timestamp have rolled over the 32bits) into the predicate register
+ * to be used for the predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(mi_predicate_result);
+
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE | 1;
+
+ /* Restart from the beginning if we had timestamps roll over. */
+ *cs++ = (GRAPHICS_VER(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4;
+ *cs++ = 0;
+
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE;
+
+ /*
+ * Now add the diff between to previous timestamps and add it to :
+ * (((1 * << 64) - 1) - delay_ns)
+ *
+ * When the Carry Flag contains 1 this means the elapsed time is
+ * longer than the expected delay, and we can exit the wait loop.
+ */
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET));
+ *cs++ = lower_32_bits(delay_ticks);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4;
+ *cs++ = upper_32_bits(delay_ticks);
+
+ *cs++ = MI_MATH(4);
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS));
+ *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET));
+ *cs++ = MI_MATH_ADD;
+ *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF);
+
+ *cs++ = MI_ARB_CHECK;
+
+ /*
+ * Transfer the result into the predicate register to be used for the
+ * predicated jump.
+ */
+ *cs++ = MI_LOAD_REGISTER_REG | (3 - 2);
+ *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE));
+ *cs++ = i915_mmio_reg_offset(mi_predicate_result);
+
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE | 1;
+
+ /* Predicate the jump. */
+ *cs++ = (GRAPHICS_VER(i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8) |
+ MI_BATCH_PREDICATE;
+ *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4;
+ *cs++ = 0;
+
+ if (HAS_MI_SET_PREDICATE(i915))
+ *cs++ = MI_SET_PREDICATE;
+
+ /* Restore registers. */
+ for (i = 0; i < N_CS_GPR; i++)
+ cs = save_restore_register(
+ stream, cs, false /* restore */, CS_GPR(i),
+ GPR_SAVE_OFFSET + 8 * i, 2);
+ cs = save_restore_register(
+ stream, cs, false /* restore */, mi_predicate_result,
+ PREDICATE_SAVE_OFFSET, 1);
+
+ /* And return to the ring. */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch));
+
+ i915_gem_object_flush_map(bo);
+ __i915_gem_object_release_map(bo);
+
+ goto out_ww;
+
+err_unpin:
+ i915_vma_unpin_and_release(&vma, 0);
+out_ww:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ i915_gem_object_put(bo);
+ return ret;
+}
+
+static u32 *write_cs_mi_lri(u32 *cs,
+ const struct i915_oa_reg *reg_data,
+ u32 n_regs)
{
u32 i;
for (i = 0; i < n_regs; i++) {
- const struct i915_oa_reg *reg = regs + i;
+ if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) {
+ u32 n_lri = min_t(u32,
+ n_regs - i,
+ MI_LOAD_REGISTER_IMM_MAX_REGS);
- I915_WRITE(reg->addr, reg->value);
+ *cs++ = MI_LOAD_REGISTER_IMM(n_lri);
+ }
+ *cs++ = i915_mmio_reg_offset(reg_data[i].addr);
+ *cs++ = reg_data[i].value;
}
+
+ return cs;
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static int num_lri_dwords(int num_regs)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ int count = 0;
+
+ if (num_regs > 0) {
+ count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS);
+ count += num_regs * 2;
+ }
+
+ return count;
+}
+
+static struct i915_oa_config_bo *
+alloc_oa_config_buffer(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_oa_config_bo *oa_bo;
+ struct i915_gem_ww_ctx ww;
+ size_t config_length = 0;
+ u32 *cs;
+ int err;
+
+ oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL);
+ if (!oa_bo)
+ return ERR_PTR(-ENOMEM);
+
+ config_length += num_lri_dwords(oa_config->mux_regs_len);
+ config_length += num_lri_dwords(oa_config->b_counter_regs_len);
+ config_length += num_lri_dwords(oa_config->flex_regs_len);
+ config_length += 3; /* MI_BATCH_BUFFER_START */
+ config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_shmem(stream->perf->i915, config_length);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_free;
+ }
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(obj, &ww);
+ if (err)
+ goto out_ww;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_ww;
+ }
+
+ cs = write_cs_mi_lri(cs,
+ oa_config->mux_regs,
+ oa_config->mux_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->b_counter_regs,
+ oa_config->b_counter_regs_len);
+ cs = write_cs_mi_lri(cs,
+ oa_config->flex_regs,
+ oa_config->flex_regs_len);
+
+ /* Jump into the active wait. */
+ *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ?
+ MI_BATCH_BUFFER_START :
+ MI_BATCH_BUFFER_START_GEN8);
+ *cs++ = i915_ggtt_offset(stream->noa_wait);
+ *cs++ = 0;
+
+ i915_gem_object_flush_map(obj);
+ __i915_gem_object_release_map(obj);
+
+ oa_bo->vma = i915_vma_instance(obj,
+ &stream->engine->gt->ggtt->vm,
+ NULL);
+ if (IS_ERR(oa_bo->vma)) {
+ err = PTR_ERR(oa_bo->vma);
+ goto out_ww;
+ }
- /* PRM:
+ oa_bo->oa_config = i915_oa_config_get(oa_config);
+ llist_add(&oa_bo->node, &stream->oa_config_bos);
+
+out_ww:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+
+ if (err)
+ i915_gem_object_put(obj);
+err_free:
+ if (err) {
+ kfree(oa_bo);
+ return ERR_PTR(err);
+ }
+ return oa_bo;
+}
+
+static struct i915_vma *
+get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
+{
+ struct i915_oa_config_bo *oa_bo;
+
+ /*
+ * Look for the buffer in the already allocated BOs attached
+ * to the stream.
+ */
+ llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) {
+ if (oa_bo->oa_config == oa_config &&
+ memcmp(oa_bo->oa_config->uuid,
+ oa_config->uuid,
+ sizeof(oa_config->uuid)) == 0)
+ goto out;
+ }
+
+ oa_bo = alloc_oa_config_buffer(stream, oa_config);
+ if (IS_ERR(oa_bo))
+ return ERR_CAST(oa_bo);
+
+out:
+ return i915_vma_get(oa_bo->vma);
+}
+
+static int
+emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce,
+ struct i915_active *active)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ vma = get_oa_vma(stream, oa_config);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ err = i915_gem_object_lock(vma->obj, &ww);
+ if (err)
+ goto err;
+
+ err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto err;
+
+ intel_engine_pm_get(ce->engine);
+ rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma_unpin;
+ }
+
+ if (!IS_ERR_OR_NULL(active)) {
+ /* After all individual context modifications */
+ err = i915_request_await_active(rq, active,
+ I915_ACTIVE_AWAIT_ACTIVE);
+ if (err)
+ goto err_add_request;
+
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
+ }
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto err_add_request;
+
+ err = rq->engine->emit_bb_start(rq,
+ i915_vma_offset(vma), 0,
+ I915_DISPATCH_SECURE);
+ if (err)
+ goto err_add_request;
+
+err_add_request:
+ i915_request_add(rq);
+err_vma_unpin:
+ i915_vma_unpin(vma);
+err:
+ if (err == -EDEADLK) {
+ err = i915_gem_ww_ctx_backoff(&ww);
+ if (!err)
+ goto retry;
+ }
+
+ i915_gem_ww_ctx_fini(&ww);
+ i915_vma_put(vma);
+ return err;
+}
+
+static struct intel_context *oa_context(struct i915_perf_stream *stream)
+{
+ return stream->pinned_ctx ?: stream->engine->kernel_context;
+}
+
+static int
+hsw_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ /*
+ * PRM:
*
* OA unit is using “crclk” for its functionality. When trunk
* level clock gating takes place, OA clock would be gated,
@@ -1575,53 +2394,49 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* count the events from non-render domain. Unit level clock
* gating for RCS should also be disabled.
*/
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
+
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
+}
- /* It apparently takes a fairly long time for a new MUX
- * configuration to be be applied after these register writes.
- * This delay duration was derived empirically based on the
- * render_basic config but hopefully it covers the maximum
- * configuration latency.
- *
- * As a fallback, the checks in _append_oa_reports() to skip
- * invalid OA reports do also seem to work to discard reports
- * generated before this config has completed - albeit not
- * silently.
- *
- * Unfortunately this is essentially a magic number, since we
- * don't currently know of a reliable mechanism for predicting
- * how long the MUX config will take to apply and besides
- * seeing invalid reports we don't know of a reliable way to
- * explicitly check that the MUX config has landed.
- *
- * It's even possible we've miss characterized the underlying
- * problem - it just seems like the simplest explanation why
- * a delay at this location would mitigate any invalid reports.
- */
- usleep_range(15000, 20000);
+static void hsw_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+ intel_uncore_rmw(uncore, GEN6_UCGCTL1,
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0);
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN7_DOP_CLOCK_GATE_ENABLE);
- return 0;
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
-static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
+ i915_reg_t reg)
{
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
- ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
- GEN7_DOP_CLOCK_GATE_ENABLE));
+ u32 mmio = i915_mmio_reg_offset(reg);
+ int i;
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
-}
+ /*
+ * This arbitrary default will select the 'EU FPU0 Pipeline
+ * Active' event. In the future it's anticipated that there
+ * will be an explicit 'No Event' we can select, but not yet...
+ */
+ if (!oa_config)
+ return 0;
+
+ for (i = 0; i < oa_config->flex_regs_len; i++) {
+ if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
+ return oa_config->flex_regs[i].value;
+ }
+ return 0;
+}
/*
* NB: It must always remain pointer safe to run this even if the OA unit
* has been disabled.
@@ -1629,15 +2444,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled.
*/
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(const struct intel_context *ce,
+ const struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = ctx->i915;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
- i915_reg_t flex_regs[] = {
+ static const i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
EU_PERF_CNTL1,
EU_PERF_CNTL2,
@@ -1646,37 +2460,201 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
EU_PERF_CNTL5,
EU_PERF_CNTL6,
};
+ u32 *reg_state = ce->lrc_reg_state;
int i;
- CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
- GEN8_OA_COUNTER_RESUME);
+ reg_state[ctx_oactxctrl + 1] =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
- for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- u32 state_offset = ctx_flexeu0 + i * 2;
- u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
+ for (i = 0; i < ARRAY_SIZE(flex_regs); i++)
+ reg_state[ctx_flexeu0 + i * 2 + 1] =
+ oa_config_flex_reg(stream->oa_config, flex_regs[i]);
+}
- /*
- * This arbitrary default will select the 'EU FPU0 Pipeline
- * Active' event. In the future it's anticipated that there
- * will be an explicit 'No Event' we can select, but not yet...
- */
- u32 value = 0;
+struct flex {
+ i915_reg_t reg;
+ u32 offset;
+ u32 value;
+};
+
+static int
+gen8_store_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 offset;
+ u32 *cs;
- if (oa_config) {
- u32 j;
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- for (j = 0; j < oa_config->flex_regs_len; j++) {
- if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
- value = oa_config->flex_regs[j].value;
- break;
- }
- }
- }
+ offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
+ do {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset + flex->offset * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = flex->value;
+ } while (flex++, --count);
+
+ intel_ring_advance(rq, cs);
- CTX_REG(reg_state, state_offset, flex_regs[i], value);
+ return 0;
+}
+
+static int
+gen8_load_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(!count || count > 63);
+
+ cs = intel_ring_begin(rq, 2 * count + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *cs++ = i915_mmio_reg_offset(flex->reg);
+ *cs++ = flex->value;
+ } while (flex++, --count);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen8_modify_context(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ /* Serialise with the remote context */
+ err = intel_context_prepare_remote_request(ce, rq);
+ if (err == 0)
+ err = gen8_store_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int
+gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count,
+ struct i915_active *active)
+{
+ struct i915_request *rq;
+ int err;
+
+ intel_engine_pm_get(ce->engine);
+ rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (!IS_ERR_OR_NULL(active)) {
+ err = i915_active_add_request(active, rq);
+ if (err)
+ goto err_add_request;
}
+
+ err = gen8_load_flex(rq, ce, flex, count);
+ if (err)
+ goto err_add_request;
+
+err_add_request:
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_configure_context(struct i915_perf_stream *stream,
+ struct i915_gem_context *ctx,
+ struct flex *flex, unsigned int count)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ GEM_BUG_ON(ce == ce->engine->kernel_context);
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ /* Otherwise OA settings will be set upon first use */
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
+ err = gen8_modify_context(ce, flex, count);
+
+ intel_context_unpin(ce);
+ if (err)
+ break;
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ return err;
+}
+
+static int gen12_configure_oar_context(struct i915_perf_stream *stream,
+ struct i915_active *active)
+{
+ int err;
+ struct intel_context *ce = stream->pinned_ctx;
+ u32 format = stream->oa_buffer.format->format;
+ u32 offset = stream->perf->ctx_oactxctrl_offset;
+ struct flex regs_context[] = {
+ {
+ GEN8_OACTXCONTROL,
+ offset + 1,
+ active ? GEN8_OA_COUNTER_RESUME : 0,
+ },
+ };
+ /* Offsets in regs_lri are not used since this configuration is only
+ * applied using LRI. Initialize the correct offsets for posterity.
+ */
+#define GEN12_OAR_OACONTROL_OFFSET 0x5B0
+ struct flex regs_lri[] = {
+ {
+ GEN12_OAR_OACONTROL,
+ GEN12_OAR_OACONTROL_OFFSET + 1,
+ (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
+ (active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+ },
+ {
+ RING_CONTEXT_CONTROL(ce->engine->mmio_base),
+ CTX_CONTEXT_CONTROL,
+ _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
+ active ?
+ GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
+ 0)
+ },
+ };
+
+ /* Modify the context image of pinned context with regs_context */
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ return err;
+
+ err = gen8_modify_context(ce, regs_context,
+ ARRAY_SIZE(regs_context));
+ intel_context_unlock_pinned(ce);
+ if (err)
+ return err;
+
+ /* Apply regs_lri using LRI with pinned context */
+ return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
}
/*
@@ -1702,17 +2680,21 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
* per-context OA state.
*
* Note: it's only the RCS/Render context that has any OA state.
+ * Note: the first flex register passed must always be R_PWR_CLK_STATE
*/
-static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int
+oa_configure_all_contexts(struct i915_perf_stream *stream,
+ struct flex *regs,
+ size_t num_regs,
+ struct i915_active *active)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
- unsigned int map_type = i915_coherent_map_type(dev_priv);
- struct i915_gem_context *ctx;
- struct i915_request *rq;
- int ret;
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_engine_cs *engine;
+ struct intel_gt *gt = stream->engine->gt;
+ struct i915_gem_context *ctx, *cn;
+ int err;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&gt->perf.lock);
/*
* The OA register config is setup through the context image. This image
@@ -1724,53 +2706,100 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* this might leave small interval of time where the OA unit is
* configured at an invalid sampling period.
*
- * So far the best way to work around this issue seems to be draining
- * the GPU from any submitted work.
+ * Note that since we emit all requests from a single ring, there
+ * is still an implicit global barrier here that may cause a high
+ * priority context to wait for an otherwise independent low priority
+ * context. Contexts idle at the time of reconfiguration are not
+ * trapped behind the barrier.
*/
- ret = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- /* Update all contexts now that we've stalled the submission. */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = to_intel_context(ctx, engine);
- u32 *regs;
-
- /* OA settings will be set upon first use */
- if (!ce->state)
+ spin_lock(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ if (!kref_get_unless_zero(&ctx->ref))
continue;
- regs = i915_gem_object_pin_map(ce->state->obj, map_type);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
-
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
+ spin_unlock(&i915->gem.contexts.lock);
- gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+ err = gen8_configure_context(stream, ctx, regs, num_regs);
+ if (err) {
+ i915_gem_context_put(ctx);
+ return err;
+ }
- i915_gem_object_unpin_map(ce->state->obj);
+ spin_lock(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
}
+ spin_unlock(&i915->gem.contexts.lock);
/*
- * Apply the configuration by doing one context restore of the edited
- * context image.
+ * After updating all other contexts, we need to modify ourselves.
+ * If we don't modify the kernel_context, we do not get events while
+ * idle.
*/
- rq = i915_request_alloc(engine, dev_priv->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ for_each_uabi_engine(engine, i915) {
+ struct intel_context *ce = engine->kernel_context;
- i915_request_add(rq);
+ if (engine->class != RENDER_CLASS)
+ continue;
+
+ regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
+
+ err = gen8_modify_self(ce, regs, num_regs, active);
+ if (err)
+ return err;
+ }
return 0;
}
-static int gen8_enable_metric_set(struct i915_perf_stream *stream)
+static int
+lrc_configure_all_contexts(struct i915_perf_stream *stream,
+ const struct i915_oa_config *oa_config,
+ struct i915_active *active)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
+ u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset;
+ /* The MMIO offsets for Flex EU registers aren't contiguous */
+ const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1)
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
+ CTX_R_PWR_CLK_STATE,
+ },
+ {
+ GEN8_OACTXCONTROL,
+ ctx_oactxctrl + 1,
+ },
+ { EU_PERF_CNTL0, ctx_flexeuN(0) },
+ { EU_PERF_CNTL1, ctx_flexeuN(1) },
+ { EU_PERF_CNTL2, ctx_flexeuN(2) },
+ { EU_PERF_CNTL3, ctx_flexeuN(3) },
+ { EU_PERF_CNTL4, ctx_flexeuN(4) },
+ { EU_PERF_CNTL5, ctx_flexeuN(5) },
+ { EU_PERF_CNTL6, ctx_flexeuN(6) },
+ };
+#undef ctx_flexeuN
+ int i;
+
+ regs[1].value =
+ (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME;
+
+ for (i = 2; i < ARRAY_SIZE(regs); i++)
+ regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
+
+ return oa_configure_all_contexts(stream,
+ regs, ARRAY_SIZE(regs),
+ active);
+}
+
+static int
+gen8_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1796,10 +2825,10 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN(dev_priv, 9, 11)) {
- I915_WRITE(GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
+ intel_uncore_write(uncore, GEN8_OA_DEBUG,
+ _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -1807,45 +2836,146 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* to make sure all slices/subslices are ON before writing to NOA
* registers.
*/
- ret = gen8_configure_all_contexts(dev_priv, oa_config);
+ ret = lrc_configure_all_contexts(stream, oa_config, active);
if (ret)
return ret;
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
+}
- config_oa_regs(dev_priv, oa_config->b_counter_regs,
- oa_config->b_counter_regs_len);
+static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
+{
+ return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
+ (stream->sample_flags & SAMPLE_OA_REPORT) ?
+ 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+}
- return 0;
+static int
+gen12_enable_metric_set(struct i915_perf_stream *stream,
+ struct i915_active *active)
+{
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct intel_uncore *uncore = stream->uncore;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 sqcnt1;
+ int ret;
+
+ /*
+ * Wa_1508761755
+ * EU NOA signals behave incorrectly if EU clock gating is enabled.
+ * Disable thread stall DOP gating and EU DOP gating.
+ */
+ if (IS_DG2(i915)) {
+ intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
+ _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
+ }
+
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
+ /* Disable clk ratio reports, like previous Gens. */
+ _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ /*
+ * If the user didn't require OA reports, instruct
+ * the hardware not to emit ctx switch reports.
+ */
+ oag_report_ctx_switches(stream));
+
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_ctx_ctrl, periodic ?
+ (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME |
+ GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE |
+ (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT))
+ : 0);
+
+ /*
+ * Initialize Super Queue Internal Cnt Register
+ * Set PMON Enable in order to collect valid metrics.
+ * Enable bytes per clock reporting in OA.
+ */
+ sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
+ (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
+
+ intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
+
+ /*
+ * For Gen12, performance counters are context
+ * saved/restored. Only enable it for the context that
+ * requested this.
+ */
+ if (stream->ctx) {
+ ret = gen12_configure_oar_context(stream, active);
+ if (ret)
+ return ret;
+ }
+
+ return emit_oa_config(stream,
+ stream->oa_config, oa_context(stream),
+ active);
}
-static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen8_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct intel_uncore *uncore = stream->uncore;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
- I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
- ~GT_NOA_ENABLE));
+ intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
}
-static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
+static void gen11_disable_metric_set(struct i915_perf_stream *stream)
{
+ struct intel_uncore *uncore = stream->uncore;
+
/* Reset all contexts' slices/subslices configurations. */
- gen8_configure_all_contexts(dev_priv, NULL);
+ lrc_configure_all_contexts(stream, NULL, NULL);
+
+ /* Make sure we disable noa to save power. */
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+}
+
+static void gen12_disable_metric_set(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+ struct drm_i915_private *i915 = stream->perf->i915;
+ u32 sqcnt1;
+
+ /*
+ * Wa_1508761755: Enable thread stall DOP gating and EU DOP gating.
+ */
+ if (IS_DG2(i915)) {
+ intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
+ _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
+ _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ }
+
+ /* disable the context save/restore or OAR counters */
+ if (stream->ctx)
+ gen12_configure_oar_context(stream, NULL);
/* Make sure we disable noa to save power. */
- I915_WRITE(RPM_CONFIG1,
- I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
+ intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
+
+ sqcnt1 = GEN12_SQCNT1_PMON_ENABLE |
+ (HAS_OA_BPC_REPORTING(i915) ? GEN12_SQCNT1_OABPC : 0);
+
+ /* Reset PMON Enable to save power. */
+ intel_uncore_rmw(uncore, GEN12_SQCNT1, sqcnt1, 0);
}
static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
struct i915_gem_context *ctx = stream->ctx;
- u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
- bool periodic = dev_priv->perf.oa.periodic;
- u32 period_exponent = dev_priv->perf.oa.period_exponent;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ u32 ctx_id = stream->specific_ctx_id;
+ bool periodic = stream->periodic;
+ u32 period_exponent = stream->period_exponent;
+ u32 report_format = stream->oa_buffer.format->format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -1856,22 +2986,22 @@ static void gen7_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen7_init_oa_buffer(dev_priv);
-
- I915_WRITE(GEN7_OACONTROL,
- (ctx_id & GEN7_OACONTROL_CTX_MASK) |
- (period_exponent <<
- GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
- (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
- (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
- (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
- GEN7_OACONTROL_ENABLE);
+ gen7_init_oa_buffer(stream);
+
+ intel_uncore_write(uncore, GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
}
static void gen8_oa_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+ struct intel_uncore *uncore = stream->uncore;
+ u32 report_format = stream->oa_buffer.format->format;
/*
* Reset buf pointers so we don't forward reports from before now.
@@ -1882,16 +3012,37 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
- gen8_init_oa_buffer(dev_priv);
+ gen8_init_oa_buffer(stream);
/*
* Note: we don't rely on the hardware to perform single context
* filtering and instead filter on the cpu based on the context-id
* field of reports
*/
- I915_WRITE(GEN8_OACONTROL, (report_format <<
- GEN8_OA_REPORT_FORMAT_SHIFT) |
- GEN8_OA_COUNTER_ENABLE);
+ intel_uncore_write(uncore, GEN8_OACONTROL,
+ (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) |
+ GEN8_OA_COUNTER_ENABLE);
+}
+
+static void gen12_oa_enable(struct i915_perf_stream *stream)
+{
+ const struct i915_perf_regs *regs;
+ u32 val;
+
+ /*
+ * If we don't want OA reports from the OA buffer, then we don't even
+ * need to program the OAG unit.
+ */
+ if (!(stream->sample_flags & SAMPLE_OA_REPORT))
+ return;
+
+ gen12_init_oa_buffer(stream);
+
+ regs = __oa_regs(stream);
+ val = (stream->oa_buffer.format->format << regs->oa_ctrl_counter_format_shift) |
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE;
+
+ intel_uncore_write(stream->uncore, regs->oa_ctrl, val);
}
/**
@@ -1905,36 +3056,59 @@ static void gen8_oa_enable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ stream->pollin = false;
- dev_priv->perf.oa.ops.oa_enable(stream);
+ stream->perf->ops.oa_enable(stream);
- if (dev_priv->perf.oa.periodic)
- hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
- ns_to_ktime(POLL_PERIOD),
+ if (stream->sample_flags & SAMPLE_OA_REPORT)
+ hrtimer_start(&stream->poll_check_timer,
+ ns_to_ktime(stream->poll_oa_period),
HRTIMER_MODE_REL_PINNED);
}
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- I915_WRITE(GEN7_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN7_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = stream->uncore;
- I915_WRITE(GEN8_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN8_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
+}
+
+static void gen12_oa_disable(struct i915_perf_stream *stream)
+{
+ struct intel_uncore *uncore = stream->uncore;
+
+ intel_uncore_write(uncore, __oa_regs(stream)->oa_ctrl, 0);
+ if (intel_wait_for_register(uncore,
+ __oa_regs(stream)->oa_ctrl,
+ GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
+ 50))
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
+
+ intel_uncore_write(uncore, GEN12_OA_TLB_INV_CR, 1);
+ if (intel_wait_for_register(uncore,
+ GEN12_OA_TLB_INV_CR,
+ 1, 0,
+ 50))
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA tlb invalidate timed out\n");
}
/**
@@ -1947,12 +3121,10 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
*/
static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
-
- dev_priv->perf.oa.ops.oa_disable(stream);
+ stream->perf->ops.oa_disable(stream);
- if (dev_priv->perf.oa.periodic)
- hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+ if (stream->sample_flags & SAMPLE_OA_REPORT)
+ hrtimer_cancel(&stream->poll_check_timer);
}
static const struct i915_perf_stream_ops i915_oa_stream_ops = {
@@ -1964,6 +3136,81 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.read = i915_oa_read,
};
+static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
+{
+ struct i915_active *active;
+ int err;
+
+ active = i915_active_create();
+ if (!active)
+ return -ENOMEM;
+
+ err = stream->perf->ops.enable_metric_set(stream, active);
+ if (err == 0)
+ __i915_active_wait(active, TASK_UNINTERRUPTIBLE);
+
+ i915_active_put(active);
+ return err;
+}
+
+static void
+get_default_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine)
+{
+ const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu;
+
+ *out_sseu = intel_sseu_from_device_info(devinfo_sseu);
+
+ if (GRAPHICS_VER(engine->i915) == 11) {
+ /*
+ * We only need subslice count so it doesn't matter which ones
+ * we select - just turn off low bits in the amount of half of
+ * all available subslices per slice.
+ */
+ out_sseu->subslice_mask =
+ ~(~0 << (hweight8(out_sseu->subslice_mask) / 2));
+ out_sseu->slice_mask = 0x1;
+ }
+}
+
+static int
+get_sseu_config(struct intel_sseu *out_sseu,
+ struct intel_engine_cs *engine,
+ const struct drm_i915_gem_context_param_sseu *drm_sseu)
+{
+ if (drm_sseu->engine.engine_class != engine->uabi_class ||
+ drm_sseu->engine.engine_instance != engine->uabi_instance)
+ return -EINVAL;
+
+ return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu);
+}
+
+/*
+ * OA timestamp frequency = CS timestamp frequency in most platforms. On some
+ * platforms OA unit ignores the CTC_SHIFT and the 2 timestamps differ. In such
+ * cases, return the adjusted CS timestamp frequency to the user.
+ */
+u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt = to_gt(i915);
+
+ /* Wa_18013179988 */
+ if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(to_gt(i915)->uncore->rpm, wakeref)
+ reg = intel_uncore_read(to_gt(i915)->uncore, RPM_CONFIG0);
+
+ shift = REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK,
+ reg);
+
+ return to_gt(i915)->clock_frequency << (3 - shift);
+ }
+
+ return to_gt(i915)->clock_frequency;
+}
+
/**
* i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream
@@ -1986,92 +3233,98 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- int format_size;
+ struct drm_i915_private *i915 = stream->perf->i915;
+ struct i915_perf *perf = stream->perf;
+ struct i915_perf_group *g;
int ret;
- /* If the sysfs metrics/ directory wasn't registered for some
+ if (!props->engine) {
+ drm_dbg(&stream->perf->i915->drm,
+ "OA engine not specified\n");
+ return -EINVAL;
+ }
+ g = props->engine->oa_group;
+
+ /*
+ * If the sysfs metrics/ directory wasn't registered for some
* reason then don't let userspace try their luck with config
* IDs
*/
- if (!dev_priv->perf.metrics_kobj) {
- DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ if (!perf->metrics_kobj) {
+ drm_dbg(&stream->perf->i915->drm,
+ "OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
- if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
- DRM_DEBUG("Only OA report sampling supported\n");
+ if (!(props->sample_flags & SAMPLE_OA_REPORT) &&
+ (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) {
+ drm_dbg(&stream->perf->i915->drm,
+ "Only OA report sampling supported\n");
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.enable_metric_set) {
- DRM_DEBUG("OA unit not supported\n");
+ if (!perf->ops.enable_metric_set) {
+ drm_dbg(&stream->perf->i915->drm,
+ "OA unit not supported\n");
return -ENODEV;
}
- /* To avoid the complexity of having to accurately filter
+ /*
+ * To avoid the complexity of having to accurately filter
* counter reports and marshal to the appropriate client
* we currently only allow exclusive access
*/
- if (dev_priv->perf.oa.exclusive_stream) {
- DRM_DEBUG("OA unit already in use\n");
+ if (g->exclusive_stream) {
+ drm_dbg(&stream->perf->i915->drm,
+ "OA unit already in use\n");
return -EBUSY;
}
if (!props->oa_format) {
- DRM_DEBUG("OA report format not specified\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "OA report format not specified\n");
return -EINVAL;
}
- /* We set up some ratelimit state to potentially throttle any _NOTES
- * about spurious, invalid OA reports which we don't forward to
- * userspace.
- *
- * The initialization is associated with opening the stream (not driver
- * init) considering we print a _NOTE about any throttling when closing
- * the stream instead of waiting until driver _fini which no one would
- * ever see.
- *
- * Using the same limiting factors as printk_ratelimit()
- */
- ratelimit_state_init(&dev_priv->perf.oa.spurious_report_rs,
- 5 * HZ, 10);
- /* Since we use a DRM_NOTE for spurious reports it would be
- * inconsistent to let __ratelimit() automatically print a warning for
- * throttling.
- */
- ratelimit_set_flags(&dev_priv->perf.oa.spurious_report_rs,
- RATELIMIT_MSG_ON_RELEASE);
+ stream->engine = props->engine;
+ stream->uncore = stream->engine->gt->uncore;
stream->sample_size = sizeof(struct drm_i915_perf_record_header);
- format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
-
- stream->sample_flags |= SAMPLE_OA_REPORT;
- stream->sample_size += format_size;
-
- dev_priv->perf.oa.oa_buffer.format_size = format_size;
- if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+ stream->oa_buffer.format = &perf->oa_formats[props->oa_format];
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0))
return -EINVAL;
- dev_priv->perf.oa.oa_buffer.format =
- dev_priv->perf.oa.oa_formats[props->oa_format].format;
+ stream->sample_flags = props->sample_flags;
+ stream->sample_size += stream->oa_buffer.format->size;
- dev_priv->perf.oa.periodic = props->oa_periodic;
- if (dev_priv->perf.oa.periodic)
- dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+ stream->hold_preemption = props->hold_preemption;
+
+ stream->periodic = props->oa_periodic;
+ if (stream->periodic)
+ stream->period_exponent = props->oa_period_exponent;
if (stream->ctx) {
ret = oa_get_render_ctx_id(stream);
if (ret) {
- DRM_DEBUG("Invalid context id to filter with\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "Invalid context id to filter with\n");
return ret;
}
}
- ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config);
+ ret = alloc_noa_wait(stream);
if (ret) {
- DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set);
+ drm_dbg(&stream->perf->i915->drm,
+ "Unable to allocate NOA wait batch buffer\n");
+ goto err_noa_wait_alloc;
+ }
+
+ stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set);
+ if (!stream->oa_config) {
+ drm_dbg(&stream->perf->i915->drm,
+ "Invalid OA config id=%i\n", props->metrics_set);
+ ret = -EINVAL;
goto err_config;
}
@@ -2087,106 +3340,71 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_engine_pm_get(stream->engine);
+ intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL);
- ret = alloc_oa_buffer(dev_priv);
+ ret = alloc_oa_buffer(stream);
if (ret)
goto err_oa_buf_alloc;
- ret = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (ret)
- goto err_lock;
+ stream->ops = &i915_oa_stream_ops;
- ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
+ stream->engine->gt->perf.sseu = props->sseu;
+ WRITE_ONCE(g->exclusive_stream, stream);
+
+ ret = i915_perf_stream_enable_sync(stream);
if (ret) {
- DRM_DEBUG("Unable to enable metric set\n");
+ drm_dbg(&stream->perf->i915->drm,
+ "Unable to enable metric set\n");
goto err_enable;
}
- stream->ops = &i915_oa_stream_ops;
-
- dev_priv->perf.oa.exclusive_stream = stream;
+ drm_dbg(&stream->perf->i915->drm,
+ "opening stream oa config uuid=%s\n",
+ stream->oa_config->uuid);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ hrtimer_setup(&stream->poll_check_timer, oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ init_waitqueue_head(&stream->poll_wq);
+ spin_lock_init(&stream->oa_buffer.ptr_lock);
+ mutex_init(&stream->lock);
return 0;
err_enable:
- dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ WRITE_ONCE(g->exclusive_stream, NULL);
+ perf->ops.disable_metric_set(stream);
-err_lock:
- free_oa_buffer(dev_priv);
+ free_oa_buffer(stream);
err_oa_buf_alloc:
- put_oa_config(dev_priv, stream->oa_config);
+ intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL);
+ intel_engine_pm_put(stream->engine);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ free_oa_configs(stream);
err_config:
+ free_noa_wait(stream);
+
+err_noa_wait_alloc:
if (stream->ctx)
oa_put_render_ctx_id(stream);
return ret;
}
-void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- u32 *reg_state)
+void i915_oa_init_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine)
{
struct i915_perf_stream *stream;
- if (engine->id != RCS)
+ if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.oa.exclusive_stream;
- if (stream)
- gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
-}
-
-/**
- * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
- * @stream: An i915 perf stream
- * @file: An i915 perf stream file
- * @buf: destination buffer given by userspace
- * @count: the number of bytes userspace wants to read
- * @ppos: (inout) file seek position (unused)
- *
- * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
- * ensure that if we've successfully copied any data then reporting that takes
- * precedence over any internal error status, so the data isn't lost.
- *
- * For example ret will be -ENOSPC whenever there is more buffered data than
- * can be copied to userspace, but that's only interesting if we weren't able
- * to copy some data because it implies the userspace buffer is too small to
- * receive a single record (and we never split records).
- *
- * Another case with ret == -EFAULT is more of a grey area since it would seem
- * like bad form for userspace to ask us to overrun its buffer, but the user
- * knows best:
- *
- * http://yarchive.net/comp/linux/partial_reads_writes.html
- *
- * Returns: The number of bytes copied or a negative error code on failure.
- */
-static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
- struct file *file,
- char __user *buf,
- size_t count,
- loff_t *ppos)
-{
- /* Note we keep the offset (aka bytes read) separate from any
- * error status so that the final check for whether we return
- * the bytes read with a higher precedence than any error (see
- * comment below) doesn't need to be handled/duplicated in
- * stream->ops->read() implementations.
- */
- size_t offset = 0;
- int ret = stream->ops->read(stream, buf, count, &offset);
-
- return offset ?: (ret ?: -EAGAIN);
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+ stream = READ_ONCE(engine->oa_group->exclusive_stream);
+ if (stream && GRAPHICS_VER(stream->perf->i915) < 12)
+ gen8_update_reg_state_unlocked(ce, stream);
}
/**
@@ -2213,14 +3431,14 @@ static ssize_t i915_perf_read(struct file *file,
loff_t *ppos)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
- ssize_t ret;
+ size_t offset = 0;
+ int ret;
/* To ensure it's handled consistently we simply treat all reads of a
* disabled stream as an error. In particular it might otherwise lead
* to a deadlock for blocking file descriptors...
*/
- if (!stream->enabled)
+ if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT))
return -EIO;
if (!(file->f_flags & O_NONBLOCK)) {
@@ -2236,15 +3454,14 @@ static ssize_t i915_perf_read(struct file *file,
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_read_locked(stream, file,
- buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
- } while (ret == -EAGAIN);
+ mutex_lock(&stream->lock);
+ ret = stream->ops->read(stream, buf, count, &offset);
+ mutex_unlock(&stream->lock);
+ } while (!offset && !ret);
} else {
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_read_locked(stream, file, buf, count, ppos);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&stream->lock);
+ ret = stream->ops->read(stream, buf, count, &offset);
+ mutex_unlock(&stream->lock);
}
/* We allow the poll checking to sometimes report false positive EPOLLIN
@@ -2254,36 +3471,35 @@ static ssize_t i915_perf_read(struct file *file,
* and read() returning -EAGAIN. Clearing the oa.pollin state here
* effectively ensures we back off until the next hrtimer callback
* before reporting another EPOLLIN event.
+ * The exception to this is if ops->read() returned -ENOSPC which means
+ * that more OA data is available than could fit in the user provided
+ * buffer. In this case we want the next poll() call to not block.
*/
- if (ret >= 0 || ret == -EAGAIN) {
- /* Maybe make ->pollin per-stream state if we support multiple
- * concurrent streams in the future.
- */
- dev_priv->perf.oa.pollin = false;
- }
+ if (ret != -ENOSPC)
+ stream->pollin = false;
- return ret;
+ /* Possible values for ret are 0, -EFAULT, -ENOSPC, -EIO, ... */
+ return offset ?: (ret ?: -EAGAIN);
}
static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
{
- struct drm_i915_private *dev_priv =
- container_of(hrtimer, typeof(*dev_priv),
- perf.oa.poll_check_timer);
+ struct i915_perf_stream *stream =
+ container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (oa_buffer_check_unlocked(dev_priv)) {
- dev_priv->perf.oa.pollin = true;
- wake_up(&dev_priv->perf.oa.poll_wq);
+ if (oa_buffer_check_unlocked(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
}
- hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+ hrtimer_forward_now(hrtimer,
+ ns_to_ktime(stream->poll_oa_period));
return HRTIMER_RESTART;
}
/**
* i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
- * @dev_priv: i915 device instance
* @stream: An i915 perf stream
* @file: An i915 perf stream file
* @wait: poll() state table
@@ -2292,15 +3508,11 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
* &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
* will be woken for new stream data.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
- * with any non-file-operation driver hooks.
- *
* Returns: any poll events that are ready without sleeping
*/
-static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
- struct i915_perf_stream *stream,
- struct file *file,
- poll_table *wait)
+static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
{
__poll_t events = 0;
@@ -2312,7 +3524,7 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
* the hrtimer/oa_poll_check_timer_cb to notify us when there are
* samples to read.
*/
- if (dev_priv->perf.oa.pollin)
+ if (stream->pollin)
events |= EPOLLIN;
return events;
@@ -2334,12 +3546,11 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv,
static __poll_t i915_perf_poll(struct file *file, poll_table *wait)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
__poll_t ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_lock(&stream->lock);
+ ret = i915_perf_poll_locked(stream, file, wait);
+ mutex_unlock(&stream->lock);
return ret;
}
@@ -2364,6 +3575,9 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
if (stream->ops->enable)
stream->ops->enable(stream);
+
+ if (stream->hold_preemption)
+ intel_context_set_nopreempt(stream->pinned_ctx);
}
/**
@@ -2388,19 +3602,53 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
/* Allow stream->ops->disable() to refer to this */
stream->enabled = false;
+ if (stream->hold_preemption)
+ intel_context_clear_nopreempt(stream->pinned_ctx);
+
if (stream->ops->disable)
stream->ops->disable(stream);
}
+static long i915_perf_config_locked(struct i915_perf_stream *stream,
+ unsigned long metrics_set)
+{
+ struct i915_oa_config *config;
+ long ret = stream->oa_config->id;
+
+ config = i915_perf_get_oa_config(stream->perf, metrics_set);
+ if (!config)
+ return -EINVAL;
+
+ if (config != stream->oa_config) {
+ int err;
+
+ /*
+ * If OA is bound to a specific context, emit the
+ * reconfiguration inline from that context. The update
+ * will then be ordered with respect to submission on that
+ * context.
+ *
+ * When set globally, we use a low priority kernel context,
+ * so it will effectively take effect when idle.
+ */
+ err = emit_oa_config(stream, config, oa_context(stream), NULL);
+ if (!err)
+ config = xchg(&stream->oa_config, config);
+ else
+ ret = err;
+ }
+
+ i915_oa_config_put(config);
+
+ return ret;
+}
+
/**
- * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
+ * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
* @stream: An i915 perf stream
* @cmd: the ioctl request
* @arg: the ioctl data
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
- * with any non-file-operation driver hooks.
- *
* Returns: zero on success or a negative error code. Returns -EINVAL for
* an unknown ioctl request.
*/
@@ -2415,6 +3663,8 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
case I915_PERF_IOCTL_DISABLE:
i915_perf_disable_locked(stream);
return 0;
+ case I915_PERF_IOCTL_CONFIG:
+ return i915_perf_config_locked(stream, arg);
}
return -EINVAL;
@@ -2436,12 +3686,11 @@ static long i915_perf_ioctl(struct file *file,
unsigned long arg)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
long ret;
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&stream->lock);
ret = i915_perf_ioctl_locked(stream, cmd, arg);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&stream->lock);
return ret;
}
@@ -2453,7 +3702,7 @@ static long i915_perf_ioctl(struct file *file,
* Frees all resources associated with the given i915 perf @stream, disabling
* any associated data capture in the process.
*
- * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * Note: The &gt->perf.lock mutex has been taken to serialize
* with any non-file-operation driver hooks.
*/
static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
@@ -2464,8 +3713,6 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
if (stream->ops->destroy)
stream->ops->destroy(stream);
- list_del(&stream->link);
-
if (stream->ctx)
i915_gem_context_put(stream->ctx);
@@ -2486,11 +3733,20 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
static int i915_perf_release(struct inode *inode, struct file *file)
{
struct i915_perf_stream *stream = file->private_data;
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_perf *perf = stream->perf;
+ struct intel_gt *gt = stream->engine->gt;
- mutex_lock(&dev_priv->perf.lock);
+ /*
+ * Within this call, we know that the fd is being closed and we have no
+ * other user of stream->lock. Use the perf lock to destroy the stream
+ * here.
+ */
+ mutex_lock(&gt->perf.lock);
i915_perf_destroy_locked(stream);
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&gt->perf.lock);
+
+ /* Release the reference the perf stream kept on the driver. */
+ drm_dev_put(&perf->i915->drm);
return 0;
}
@@ -2498,7 +3754,6 @@ static int i915_perf_release(struct inode *inode, struct file *file)
static const struct file_operations fops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
.release = i915_perf_release,
.poll = i915_perf_poll,
.read = i915_perf_read,
@@ -2512,7 +3767,7 @@ static const struct file_operations fops = {
/**
* i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
* @props: individually validated u64 property value pairs
* @file: drm file
@@ -2520,7 +3775,7 @@ static const struct file_operations fops = {
* See i915_perf_ioctl_open() for interface details.
*
* Implements further stream config validation and stream initialization on
- * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * behalf of i915_perf_open_ioctl() with the &gt->perf.lock mutex
* taken to serialize with any non-file-operation driver hooks.
*
* Note: at this point the @props have only been validated in isolation and
@@ -2535,7 +3790,7 @@ static const struct file_operations fops = {
* Returns: zero on success or a negative error code.
*/
static int
-i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+i915_perf_open_ioctl_locked(struct i915_perf *perf,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props,
struct drm_file *file)
@@ -2552,10 +3807,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv = file->driver_priv;
specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
- if (!specific_ctx) {
- DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
+ if (IS_ERR(specific_ctx)) {
+ drm_dbg(&perf->i915->drm,
+ "Failed to look up context with ID %u for opening perf stream\n",
ctx_handle);
- ret = -ENOENT;
+ ret = PTR_ERR(specific_ctx);
goto err;
}
}
@@ -2566,25 +3822,52 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
* rest of the system, which we consider acceptable for a
* non-privileged client.
*
- * For Gen8+ the OA unit no longer supports clock gating off for a
+ * For Gen8->11 the OA unit no longer supports clock gating off for a
* specific context and the kernel can't securely stop the counters
* from updating as system-wide / global values. Even though we can
* filter reports based on the included context ID we can't block
* clients from seeing the raw / global counter values via
* MI_REPORT_PERF_COUNT commands and so consider it a privileged op to
* enable the OA unit by default.
+ *
+ * For Gen12+ we gain a new OAR unit that only monitors the RCS on a
+ * per context basis. So we can relax requirements there if the user
+ * doesn't request global stream access (i.e. query based sampling
+ * using MI_RECORD_PERF_COUNT.
*/
- if (IS_HASWELL(dev_priv) && specific_ctx)
+ if (IS_HASWELL(perf->i915) && specific_ctx)
privileged_op = false;
+ else if (GRAPHICS_VER(perf->i915) == 12 && specific_ctx &&
+ (props->sample_flags & SAMPLE_OA_REPORT) == 0)
+ privileged_op = false;
+
+ if (props->hold_preemption) {
+ if (!props->single_context) {
+ drm_dbg(&perf->i915->drm,
+ "preemption disable with no context\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ privileged_op = true;
+ }
+
+ /*
+ * Asking for SSEU configuration is a privileged operation.
+ */
+ if (props->has_sseu)
+ privileged_op = true;
+ else
+ get_default_sseu_config(&props->sseu, props->engine);
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
- * without CAP_SYS_ADMIN privileges.
+ * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
*/
if (privileged_op &&
- i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ i915_perf_stream_paranoid && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+ "Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
}
@@ -2595,8 +3878,9 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_ctx;
}
- stream->dev_priv = dev_priv;
+ stream->perf = perf;
stream->ctx = specific_ctx;
+ stream->poll_oa_period = props->poll_oa_period;
ret = i915_oa_stream_init(stream, param, props);
if (ret)
@@ -2611,8 +3895,6 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
goto err_flags;
}
- list_add(&stream->link, &dev_priv->perf.streams);
-
if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
f_flags |= O_CLOEXEC;
if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
@@ -2621,16 +3903,19 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
if (stream_fd < 0) {
ret = stream_fd;
- goto err_open;
+ goto err_flags;
}
if (!(param->flags & I915_PERF_FLAG_DISABLED))
i915_perf_enable_locked(stream);
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&perf->i915->drm);
+
return stream_fd;
-err_open:
- list_del(&stream->link);
err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
@@ -2643,15 +3928,29 @@ err:
return ret;
}
-static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent)
+{
+ u64 nom = (2ULL << exponent) * NSEC_PER_SEC;
+ u32 den = i915_perf_oa_timestamp_frequency(perf->i915);
+
+ return div_u64(nom + den - 1, den);
+}
+
+static __always_inline bool
+oa_format_valid(struct i915_perf *perf, enum drm_i915_oa_format format)
{
- return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ return test_bit(format, perf->format_mask);
+}
+
+static __always_inline void
+oa_format_add(struct i915_perf *perf, enum drm_i915_oa_format format)
+{
+ __set_bit(format, perf->format_mask);
}
/**
* read_properties_unlocked - validate + copy userspace stream open properties
- * @dev_priv: i915 device instance
+ * @perf: i915 perf instance
* @uprops: The array of u64 key value pairs given by userspace
* @n_props: The number of key value pairs expected in @uprops
* @props: The stream configuration built up while validating properties
@@ -2664,20 +3963,23 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
* we shouldn't validate or assume anything about ordering here. This doesn't
* rule out defining new properties with ordering requirements in the future.
*/
-static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+static int read_properties_unlocked(struct i915_perf *perf,
u64 __user *uprops,
u32 n_props,
struct perf_open_properties *props)
{
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ const struct i915_oa_format *f;
u64 __user *uprop = uprops;
+ bool config_instance = false;
+ bool config_class = false;
+ bool config_sseu = false;
+ u8 class, instance;
u32 i;
+ int ret;
memset(props, 0, sizeof(struct perf_open_properties));
-
- if (!n_props) {
- DRM_DEBUG("No i915 perf properties given\n");
- return -EINVAL;
- }
+ props->poll_oa_period = DEFAULT_POLL_PERIOD_NS;
/* Considering that ID = 0 is reserved and assuming that we don't
* (currently) expect any configurations to ever specify duplicate
@@ -2685,15 +3987,19 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
* one greater than the maximum number of properties we expect to get
* from userspace.
*/
- if (n_props >= DRM_I915_PERF_PROP_MAX) {
- DRM_DEBUG("More i915 perf properties specified than exist\n");
+ if (!n_props || n_props >= DRM_I915_PERF_PROP_MAX) {
+ drm_dbg(&perf->i915->drm,
+ "Invalid number of i915 perf properties given\n");
return -EINVAL;
}
+ /* Defaults when class:instance is not passed */
+ class = I915_ENGINE_CLASS_RENDER;
+ instance = 0;
+
for (i = 0; i < n_props; i++) {
u64 oa_period, oa_freq_hz;
u64 id, value;
- int ret;
ret = get_user(id, uprop);
if (ret)
@@ -2704,7 +4010,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
return ret;
if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
- DRM_DEBUG("Unknown i915 perf property ID\n");
+ drm_dbg(&perf->i915->drm,
+ "Unknown i915 perf property ID\n");
return -EINVAL;
}
@@ -2719,19 +4026,22 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
break;
case DRM_I915_PERF_PROP_OA_METRICS_SET:
if (value == 0) {
- DRM_DEBUG("Unknown OA metric set ID\n");
+ drm_dbg(&perf->i915->drm,
+ "Unknown OA metric set ID\n");
return -EINVAL;
}
props->metrics_set = value;
break;
case DRM_I915_PERF_PROP_OA_FORMAT:
if (value == 0 || value >= I915_OA_FORMAT_MAX) {
- DRM_DEBUG("Out-of-range OA report format %llu\n",
+ drm_dbg(&perf->i915->drm,
+ "Out-of-range OA report format %llu\n",
value);
return -EINVAL;
}
- if (!dev_priv->perf.oa.oa_formats[value].size) {
- DRM_DEBUG("Unsupported OA report format %llu\n",
+ if (!oa_format_valid(perf, value)) {
+ drm_dbg(&perf->i915->drm,
+ "Unsupported OA report format %llu\n",
value);
return -EINVAL;
}
@@ -2739,7 +4049,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
break;
case DRM_I915_PERF_PROP_OA_EXPONENT:
if (value > OA_EXPONENT_MAX) {
- DRM_DEBUG("OA timer exponent too high (> %u)\n",
+ drm_dbg(&perf->i915->drm,
+ "OA timer exponent too high (> %u)\n",
OA_EXPONENT_MAX);
return -EINVAL;
}
@@ -2751,7 +4062,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
*/
BUILD_BUG_ON(sizeof(oa_period) != 8);
- oa_period = oa_exponent_to_ns(dev_priv, value);
+ oa_period = oa_exponent_to_ns(perf, value);
/* This check is primarily to ensure that oa_period <=
* UINT32_MAX (before passing to do_div which only
@@ -2766,9 +4077,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
} else
oa_freq_hz = 0;
- if (oa_freq_hz > i915_oa_max_sample_rate &&
- !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
+ if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+ "OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
i915_oa_max_sample_rate);
return -EACCES;
}
@@ -2776,7 +4087,45 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
- case DRM_I915_PERF_PROP_MAX:
+ case DRM_I915_PERF_PROP_HOLD_PREEMPTION:
+ props->hold_preemption = !!value;
+ break;
+ case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
+ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) {
+ drm_dbg(&perf->i915->drm,
+ "SSEU config not supported on gfx %x\n",
+ GRAPHICS_VER_FULL(perf->i915));
+ return -ENODEV;
+ }
+
+ if (copy_from_user(&user_sseu,
+ u64_to_user_ptr(value),
+ sizeof(user_sseu))) {
+ drm_dbg(&perf->i915->drm,
+ "Unable to copy global sseu parameter\n");
+ return -EFAULT;
+ }
+ config_sseu = true;
+ break;
+ }
+ case DRM_I915_PERF_PROP_POLL_OA_PERIOD:
+ if (value < 100000 /* 100us */) {
+ drm_dbg(&perf->i915->drm,
+ "OA availability timer too small (%lluns < 100us)\n",
+ value);
+ return -EINVAL;
+ }
+ props->poll_oa_period = value;
+ break;
+ case DRM_I915_PERF_PROP_OA_ENGINE_CLASS:
+ class = (u8)value;
+ config_class = true;
+ break;
+ case DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE:
+ instance = (u8)value;
+ config_instance = true;
+ break;
+ default:
MISSING_CASE(id);
return -EINVAL;
}
@@ -2784,6 +4133,60 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
uprop += 2;
}
+ if ((config_class && !config_instance) ||
+ (config_instance && !config_class)) {
+ drm_dbg(&perf->i915->drm,
+ "OA engine-class and engine-instance parameters must be passed together\n");
+ return -EINVAL;
+ }
+
+ props->engine = intel_engine_lookup_user(perf->i915, class, instance);
+ if (!props->engine) {
+ drm_dbg(&perf->i915->drm,
+ "OA engine class and instance invalid %d:%d\n",
+ class, instance);
+ return -EINVAL;
+ }
+
+ if (!engine_supports_oa(props->engine)) {
+ drm_dbg(&perf->i915->drm,
+ "Engine not supported by OA %d:%d\n",
+ class, instance);
+ return -EINVAL;
+ }
+
+ /*
+ * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
+ * C6 disable in BIOS. Fail if Media C6 is enabled on steppings where OAM
+ * does not work as expected.
+ */
+ if (IS_MEDIA_GT_IP_STEP(props->engine->gt, IP_VER(13, 0), STEP_A0, STEP_C0) &&
+ props->engine->oa_group->type == TYPE_OAM &&
+ intel_check_bios_c6_setup(&props->engine->gt->rc6)) {
+ drm_dbg(&perf->i915->drm,
+ "OAM requires media C6 to be disabled in BIOS\n");
+ return -EINVAL;
+ }
+
+ i = array_index_nospec(props->oa_format, I915_OA_FORMAT_MAX);
+ f = &perf->oa_formats[i];
+ if (!engine_supports_oa_format(props->engine, f->type)) {
+ drm_dbg(&perf->i915->drm,
+ "Invalid OA format %d for class %d\n",
+ f->type, props->engine->class);
+ return -EINVAL;
+ }
+
+ if (config_sseu) {
+ ret = get_sseu_config(&props->sseu, props->engine, &user_sseu);
+ if (ret) {
+ drm_dbg(&perf->i915->drm,
+ "Invalid SSEU configuration\n");
+ return ret;
+ }
+ props->has_sseu = true;
+ }
+
return 0;
}
@@ -2802,10 +4205,10 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
* buffered data written by the GPU besides periodic OA metrics.
*
* Note we copy the properties from userspace outside of the i915 perf
- * mutex to avoid an awkward lockdep with mmap_sem.
+ * mutex to avoid an awkward lockdep with mmap_lock.
*
* Most of the implementation details are handled by
- * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * i915_perf_open_ioctl_locked() after taking the &gt->perf.lock
* mutex for serializing with any non-file-operation driver hooks.
*
* Return: A newly opened i915 Perf stream file descriptor or negative
@@ -2814,143 +4217,91 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_open_param *param = data;
+ struct intel_gt *gt;
struct perf_open_properties props;
u32 known_open_flags;
int ret;
- if (!dev_priv->perf.initialized) {
- DRM_DEBUG("i915 perf interface not available for this system\n");
+ if (!perf->i915)
return -ENOTSUPP;
- }
known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
I915_PERF_FLAG_FD_NONBLOCK |
I915_PERF_FLAG_DISABLED;
if (param->flags & ~known_open_flags) {
- DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
+ drm_dbg(&perf->i915->drm,
+ "Unknown drm_i915_perf_open_param flag\n");
return -EINVAL;
}
- ret = read_properties_unlocked(dev_priv,
+ ret = read_properties_unlocked(perf,
u64_to_user_ptr(param->properties_ptr),
param->num_properties,
&props);
if (ret)
return ret;
- mutex_lock(&dev_priv->perf.lock);
- ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
- mutex_unlock(&dev_priv->perf.lock);
+ gt = props.engine->gt;
+
+ mutex_lock(&gt->perf.lock);
+ ret = i915_perf_open_ioctl_locked(perf, param, &props, file);
+ mutex_unlock(&gt->perf.lock);
return ret;
}
/**
* i915_perf_register - exposes i915-perf to userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* In particular OA metric sets are advertised under a sysfs metrics/
* directory allowing userspace to enumerate valid IDs that can be
* used to open an i915-perf stream.
*/
-void i915_perf_register(struct drm_i915_private *dev_priv)
+void i915_perf_register(struct drm_i915_private *i915)
{
- int ret;
+ struct i915_perf *perf = &i915->perf;
+ struct intel_gt *gt = to_gt(i915);
- if (!dev_priv->perf.initialized)
+ if (!perf->i915)
return;
/* To be sure we're synchronized with an attempted
* i915_perf_open_ioctl(); considering that we register after
* being exposed to userspace.
*/
- mutex_lock(&dev_priv->perf.lock);
+ mutex_lock(&gt->perf.lock);
- dev_priv->perf.metrics_kobj =
+ perf->metrics_kobj =
kobject_create_and_add("metrics",
- &dev_priv->drm.primary->kdev->kobj);
- if (!dev_priv->perf.metrics_kobj)
- goto exit;
-
- sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
-
- if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
- } else if (IS_SKYLAKE(dev_priv)) {
- if (IS_SKL_GT2(dev_priv))
- i915_perf_load_test_config_sklgt2(dev_priv);
- else if (IS_SKL_GT3(dev_priv))
- i915_perf_load_test_config_sklgt3(dev_priv);
- else if (IS_SKL_GT4(dev_priv))
- i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_ICELAKE(dev_priv)) {
- i915_perf_load_test_config_icl(dev_priv);
- }
-
- if (dev_priv->perf.oa.test_config.id == 0)
- goto sysfs_error;
-
- ret = sysfs_create_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
- if (ret)
- goto sysfs_error;
+ &i915->drm.primary->kdev->kobj);
- atomic_set(&dev_priv->perf.oa.test_config.ref_count, 1);
-
- goto exit;
-
-sysfs_error:
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
-
-exit:
- mutex_unlock(&dev_priv->perf.lock);
+ mutex_unlock(&gt->perf.lock);
}
/**
* i915_perf_unregister - hide i915-perf from userspace
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* i915-perf state cleanup is split up into an 'unregister' and
* 'deinit' phase where the interface is first hidden from
* userspace by i915_perf_unregister() before cleaning up
* remaining state in i915_perf_fini().
*/
-void i915_perf_unregister(struct drm_i915_private *dev_priv)
+void i915_perf_unregister(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.metrics_kobj)
- return;
+ struct i915_perf *perf = &i915->perf;
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &dev_priv->perf.oa.test_config.sysfs_metric);
+ if (!perf->metrics_kobj)
+ return;
- kobject_put(dev_priv->perf.metrics_kobj);
- dev_priv->perf.metrics_kobj = NULL;
+ kobject_put(perf->metrics_kobj);
+ perf->metrics_kobj = NULL;
}
-static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
{
static const i915_reg_t flex_eu_regs[] = {
EU_PERF_CNTL0,
@@ -2970,78 +4321,175 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr)
return false;
}
-static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr)
+#define REG_EQUAL(addr, mmio) \
+ ((addr) == i915_mmio_reg_offset(mmio))
+
+static const struct i915_mmio_range gen7_oa_b_counters[] = {
+ { .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */
+ { .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */
+ { .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */
+ {}
+};
+
+static const struct i915_mmio_range gen12_oa_b_counters[] = {
+ { .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */
+ { .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
+ { .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
+ { .start = 0xd940, .end = 0xd97c }, /* GEN12_OAG_CEC[0-7][0-1] */
+ { .start = 0xdc00, .end = 0xdc3c }, /* GEN12_OAG_SCEC[0-7][0-1] */
+ { .start = 0xdc40, .end = 0xdc40 }, /* GEN12_OAG_SPCTR_CNF */
+ { .start = 0xdc44, .end = 0xdc44 }, /* GEN12_OAA_DBG_REG */
+ {}
+};
+
+static const struct i915_mmio_range mtl_oam_b_counters[] = {
+ { .start = 0x393000, .end = 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */
+ { .start = 0x393020, .end = 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */
+ { .start = 0x393040, .end = 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */
+ { .start = 0x393200, .end = 0x39323C }, /* MPES[0-7] */
+ {}
+};
+
+static const struct i915_mmio_range xehp_oa_b_counters[] = {
+ { .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
+ { .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
+ {}
+};
+
+static const struct i915_mmio_range gen7_oa_mux_regs[] = {
+ { .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
+ { .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
+ { .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */
+ {}
+};
+
+static const struct i915_mmio_range hsw_oa_mux_regs[] = {
+ { .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
+ { .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
+ { .start = 0x25100, .end = 0x2ff90 },
+ {}
+};
+
+static const struct i915_mmio_range chv_oa_mux_regs[] = {
+ { .start = 0x182300, .end = 0x1823a4 },
+ {}
+};
+
+static const struct i915_mmio_range gen8_oa_mux_regs[] = {
+ { .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
+ { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
+ {}
+};
+
+static const struct i915_mmio_range gen11_oa_mux_regs[] = {
+ { .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */
+ {}
+};
+
+static const struct i915_mmio_range gen12_oa_mux_regs[] = {
+ { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
+ { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
+ { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
+ { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */
+ { .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
+ {}
+};
+
+/*
+ * Ref: 14010536224:
+ * 0x20cc is repurposed on MTL, so use a separate array for MTL.
+ */
+static const struct i915_mmio_range mtl_oa_mux_regs[] = {
+ { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
+ { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
+ { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
+ { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */
+ { .start = 0x38d100, .end = 0x38d114}, /* VISACTL */
+ {}
+};
+
+static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ return i915_mmio_range_table_contains(addr, gen7_oa_b_counters);
+}
+
+static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+{
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs);
+}
+
+static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) &&
- addr <= i915_mmio_reg_offset(OASTARTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) &&
- addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) ||
- (addr >= i915_mmio_reg_offset(OACEC0_0) &&
- addr <= i915_mmio_reg_offset(OACEC7_1));
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen11_oa_mux_regs);
}
-static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) ||
- (addr >= i915_mmio_reg_offset(MICRO_BP0_0) &&
- addr <= i915_mmio_reg_offset(NOA_WRITE)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) ||
- (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI));
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, hsw_oa_mux_regs);
}
-static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) ||
- (addr >= i915_mmio_reg_offset(RPM_CONFIG0) &&
- addr <= i915_mmio_reg_offset(NOA_CONFIG(8)));
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, chv_oa_mux_regs);
}
-static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return gen8_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
- addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
+ return i915_mmio_range_table_contains(addr, gen12_oa_b_counters);
+}
+
+static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
+{
+ if (HAS_OAM(perf->i915) &&
+ GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
+ return i915_mmio_range_table_contains(addr, mtl_oam_b_counters);
+
+ return false;
}
-static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x25100 && addr <= 0x2FF90) ||
- (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) &&
- addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) ||
- addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0);
+ return i915_mmio_range_table_contains(addr, xehp_oa_b_counters) ||
+ i915_mmio_range_table_contains(addr, gen12_oa_b_counters) ||
+ mtl_is_valid_oam_b_counter_addr(perf, addr);
}
-static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
+static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return gen7_is_valid_mux_addr(dev_priv, addr) ||
- (addr >= 0x182300 && addr <= 0x1823A4);
+ if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
+ return i915_mmio_range_table_contains(addr, mtl_oa_mux_regs);
+ else
+ return i915_mmio_range_table_contains(addr, gen12_oa_mux_regs);
}
-static uint32_t mask_reg_value(u32 reg, u32 val)
+static u32 mask_reg_value(u32 reg, u32 val)
{
- /* HALF_SLICE_CHICKEN2 is programmed with a the
+ /*
+ * HALF_SLICE_CHICKEN2 is programmed with a the
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
* programmed by userspace doesn't change this.
*/
- if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg)
+ if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
- /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function
+ /*
+ * WAIT_FOR_RC6_EXIT has only one bit fulfilling the function
* indicated by its name and a bunch of selection fields used by OA
* configs.
*/
- if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg)
+ if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
-static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
- bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr),
+static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
+ bool (*is_valid)(struct i915_perf *perf, u32 addr),
u32 __user *regs,
u32 n_regs)
{
@@ -3052,9 +4500,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (!n_regs)
return NULL;
- if (!access_ok(regs, n_regs * sizeof(u32) * 2))
- return ERR_PTR(-EFAULT);
-
/* No is_valid function means we're not allowing any register to be programmed. */
GEM_BUG_ON(!is_valid);
if (!is_valid)
@@ -3071,8 +4516,9 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (err)
goto addr_err;
- if (!is_valid(dev_priv, addr)) {
- DRM_DEBUG("Invalid oa_reg address: %X\n", addr);
+ if (!is_valid(perf, addr)) {
+ drm_dbg(&perf->i915->drm,
+ "Invalid oa_reg address: %X\n", addr);
err = -EINVAL;
goto addr_err;
}
@@ -3094,8 +4540,8 @@ addr_err:
return ERR_PTR(err);
}
-static ssize_t show_dynamic_id(struct device *dev,
- struct device_attribute *attr,
+static ssize_t show_dynamic_id(struct kobject *kobj,
+ struct kobj_attribute *attr,
char *buf)
{
struct i915_oa_config *oa_config =
@@ -3104,7 +4550,7 @@ static ssize_t show_dynamic_id(struct device *dev,
return sprintf(buf, "%d\n", oa_config->id);
}
-static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
+static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf,
struct i915_oa_config *oa_config)
{
sysfs_attr_init(&oa_config->sysfs_metric_id.attr);
@@ -3119,7 +4565,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
oa_config->sysfs_metric.name = oa_config->uuid;
oa_config->sysfs_metric.attrs = oa_config->attrs;
- return sysfs_create_group(dev_priv->perf.metrics_kobj,
+ return sysfs_create_group(perf->metrics_kobj,
&oa_config->sysfs_metric);
}
@@ -3139,43 +4585,48 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv,
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
struct drm_i915_perf_oa_config *args = data;
struct i915_oa_config *oa_config, *tmp;
+ struct i915_oa_reg *regs;
int err, id;
- if (!dev_priv->perf.initialized) {
- DRM_DEBUG("i915 perf interface not available for this system\n");
+ if (!perf->i915)
return -ENOTSUPP;
- }
- if (!dev_priv->perf.metrics_kobj) {
- DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ if (!perf->metrics_kobj) {
+ drm_dbg(&perf->i915->drm,
+ "OA metrics weren't advertised via sysfs\n");
return -EINVAL;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+ "Insufficient privileges to add i915 OA config\n");
return -EACCES;
}
if ((!args->mux_regs_ptr || !args->n_mux_regs) &&
(!args->boolean_regs_ptr || !args->n_boolean_regs) &&
(!args->flex_regs_ptr || !args->n_flex_regs)) {
- DRM_DEBUG("No OA registers given\n");
+ drm_dbg(&perf->i915->drm,
+ "No OA registers given\n");
return -EINVAL;
}
oa_config = kzalloc(sizeof(*oa_config), GFP_KERNEL);
if (!oa_config) {
- DRM_DEBUG("Failed to allocate memory for the OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to allocate memory for the OA config\n");
return -ENOMEM;
}
- atomic_set(&oa_config->ref_count, 1);
+ oa_config->perf = perf;
+ kref_init(&oa_config->ref);
if (!uuid_is_valid(args->uuid)) {
- DRM_DEBUG("Invalid uuid format for OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Invalid uuid format for OA config\n");
err = -EINVAL;
goto reg_err;
}
@@ -3186,93 +4637,101 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid));
oa_config->mux_regs_len = args->n_mux_regs;
- oa_config->mux_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_mux_reg,
- u64_to_user_ptr(args->mux_regs_ptr),
- args->n_mux_regs);
-
- if (IS_ERR(oa_config->mux_regs)) {
- DRM_DEBUG("Failed to create OA config for mux_regs\n");
- err = PTR_ERR(oa_config->mux_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_mux_reg,
+ u64_to_user_ptr(args->mux_regs_ptr),
+ args->n_mux_regs);
+
+ if (IS_ERR(regs)) {
+ drm_dbg(&perf->i915->drm,
+ "Failed to create OA config for mux_regs\n");
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->mux_regs = regs;
oa_config->b_counter_regs_len = args->n_boolean_regs;
- oa_config->b_counter_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_b_counter_reg,
- u64_to_user_ptr(args->boolean_regs_ptr),
- args->n_boolean_regs);
-
- if (IS_ERR(oa_config->b_counter_regs)) {
- DRM_DEBUG("Failed to create OA config for b_counter_regs\n");
- err = PTR_ERR(oa_config->b_counter_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_b_counter_reg,
+ u64_to_user_ptr(args->boolean_regs_ptr),
+ args->n_boolean_regs);
+
+ if (IS_ERR(regs)) {
+ drm_dbg(&perf->i915->drm,
+ "Failed to create OA config for b_counter_regs\n");
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->b_counter_regs = regs;
- if (INTEL_GEN(dev_priv) < 8) {
+ if (GRAPHICS_VER(perf->i915) < 8) {
if (args->n_flex_regs != 0) {
err = -EINVAL;
goto reg_err;
}
} else {
oa_config->flex_regs_len = args->n_flex_regs;
- oa_config->flex_regs =
- alloc_oa_regs(dev_priv,
- dev_priv->perf.oa.ops.is_valid_flex_reg,
- u64_to_user_ptr(args->flex_regs_ptr),
- args->n_flex_regs);
-
- if (IS_ERR(oa_config->flex_regs)) {
- DRM_DEBUG("Failed to create OA config for flex_regs\n");
- err = PTR_ERR(oa_config->flex_regs);
+ regs = alloc_oa_regs(perf,
+ perf->ops.is_valid_flex_reg,
+ u64_to_user_ptr(args->flex_regs_ptr),
+ args->n_flex_regs);
+
+ if (IS_ERR(regs)) {
+ drm_dbg(&perf->i915->drm,
+ "Failed to create OA config for flex_regs\n");
+ err = PTR_ERR(regs);
goto reg_err;
}
+ oa_config->flex_regs = regs;
}
- err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ err = mutex_lock_interruptible(&perf->metrics_lock);
if (err)
goto reg_err;
/* We shouldn't have too many configs, so this iteration shouldn't be
* too costly.
*/
- idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) {
+ idr_for_each_entry(&perf->metrics_idr, tmp, id) {
if (!strcmp(tmp->uuid, oa_config->uuid)) {
- DRM_DEBUG("OA config already exists with this uuid\n");
+ drm_dbg(&perf->i915->drm,
+ "OA config already exists with this uuid\n");
err = -EADDRINUSE;
goto sysfs_err;
}
}
- err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config);
+ err = create_dynamic_oa_sysfs_entry(perf, oa_config);
if (err) {
- DRM_DEBUG("Failed to create sysfs entry for OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create sysfs entry for OA config\n");
goto sysfs_err;
}
/* Config id 0 is invalid, id 1 for kernel stored test config. */
- oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr,
+ oa_config->id = idr_alloc(&perf->metrics_idr,
oa_config, 2,
0, GFP_KERNEL);
if (oa_config->id < 0) {
- DRM_DEBUG("Failed to create sysfs entry for OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to create sysfs entry for OA config\n");
err = oa_config->id;
goto sysfs_err;
}
+ id = oa_config->id;
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ drm_dbg(&perf->i915->drm,
+ "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+ mutex_unlock(&perf->metrics_lock);
- DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id);
-
- return oa_config->id;
+ return id;
sysfs_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
+ mutex_unlock(&perf->metrics_lock);
reg_err:
- put_oa_config(dev_priv, oa_config);
- DRM_DEBUG("Failed to add new OA config\n");
+ i915_oa_config_put(oa_config);
+ drm_dbg(&perf->i915->drm,
+ "Failed to add new OA config\n");
return err;
}
@@ -3290,58 +4749,61 @@ reg_err:
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_perf *perf = &to_i915(dev)->perf;
u64 *arg = data;
struct i915_oa_config *oa_config;
int ret;
- if (!dev_priv->perf.initialized) {
- DRM_DEBUG("i915 perf interface not available for this system\n");
+ if (!perf->i915)
return -ENOTSUPP;
- }
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
+ drm_dbg(&perf->i915->drm,
+ "Insufficient privileges to remove i915 OA config\n");
return -EACCES;
}
- ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock);
+ ret = mutex_lock_interruptible(&perf->metrics_lock);
if (ret)
- goto lock_err;
+ return ret;
- oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg);
+ oa_config = idr_find(&perf->metrics_idr, *arg);
if (!oa_config) {
- DRM_DEBUG("Failed to remove unknown OA config\n");
+ drm_dbg(&perf->i915->drm,
+ "Failed to remove unknown OA config\n");
ret = -ENOENT;
- goto config_err;
+ goto err_unlock;
}
GEM_BUG_ON(*arg != oa_config->id);
- sysfs_remove_group(dev_priv->perf.metrics_kobj,
- &oa_config->sysfs_metric);
+ sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric);
+
+ idr_remove(&perf->metrics_idr, *arg);
- idr_remove(&dev_priv->perf.metrics_idr, *arg);
+ mutex_unlock(&perf->metrics_lock);
- DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
+ drm_dbg(&perf->i915->drm,
+ "Removed config %s id=%i\n", oa_config->uuid, oa_config->id);
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(oa_config);
+
+ return 0;
-config_err:
- mutex_unlock(&dev_priv->perf.metrics_lock);
-lock_err:
+err_unlock:
+ mutex_unlock(&perf->metrics_lock);
return ret;
}
-static struct ctl_table oa_table[] = {
+static const struct ctl_table oa_table[] = {
{
.procname = "perf_stream_paranoid",
.data = &i915_perf_stream_paranoid,
.maxlen = sizeof(i915_perf_stream_paranoid),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{
.procname = "oa_max_sample_rate",
@@ -3349,163 +4811,452 @@ static struct ctl_table oa_table[] = {
.maxlen = sizeof(i915_oa_max_sample_rate),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = SYSCTL_ZERO,
.extra2 = &oa_sample_rate_hard_limit,
},
- {}
};
-static struct ctl_table i915_root[] = {
- {
- .procname = "i915",
- .maxlen = 0,
- .mode = 0555,
- .child = oa_table,
- },
- {}
-};
+static u32 num_perf_groups_per_gt(struct intel_gt *gt)
+{
+ return 1;
+}
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = i915_root,
- },
- {}
-};
+static u32 __oam_engine_group(struct intel_engine_cs *engine)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 70)) {
+ /*
+ * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
+ * within the gt use the same OAM. All MTL SKUs list 1 SA MEDIA.
+ */
+ drm_WARN_ON(&engine->i915->drm,
+ engine->gt->type != GT_MEDIA);
+
+ return PERF_GROUP_OAM_SAMEDIA_0;
+ }
+
+ return PERF_GROUP_INVALID;
+}
+
+static u32 __oa_engine_group(struct intel_engine_cs *engine)
+{
+ switch (engine->class) {
+ case RENDER_CLASS:
+ return PERF_GROUP_OAG;
+
+ case VIDEO_DECODE_CLASS:
+ case VIDEO_ENHANCEMENT_CLASS:
+ return __oam_engine_group(engine);
+
+ default:
+ return PERF_GROUP_INVALID;
+ }
+}
+
+static struct i915_perf_regs __oam_regs(u32 base)
+{
+ return (struct i915_perf_regs) {
+ base,
+ GEN12_OAM_HEAD_POINTER(base),
+ GEN12_OAM_TAIL_POINTER(base),
+ GEN12_OAM_BUFFER(base),
+ GEN12_OAM_CONTEXT_CONTROL(base),
+ GEN12_OAM_CONTROL(base),
+ GEN12_OAM_DEBUG(base),
+ GEN12_OAM_STATUS(base),
+ GEN12_OAM_CONTROL_COUNTER_FORMAT_SHIFT,
+ };
+}
+
+static struct i915_perf_regs __oag_regs(void)
+{
+ return (struct i915_perf_regs) {
+ 0,
+ GEN12_OAG_OAHEADPTR,
+ GEN12_OAG_OATAILPTR,
+ GEN12_OAG_OABUFFER,
+ GEN12_OAG_OAGLBCTXCTRL,
+ GEN12_OAG_OACONTROL,
+ GEN12_OAG_OA_DEBUG,
+ GEN12_OAG_OASTATUS,
+ GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT,
+ };
+}
+
+static void oa_init_groups(struct intel_gt *gt)
+{
+ int i, num_groups = gt->perf.num_perf_groups;
+
+ for (i = 0; i < num_groups; i++) {
+ struct i915_perf_group *g = &gt->perf.group[i];
+
+ /* Fused off engines can result in a group with num_engines == 0 */
+ if (g->num_engines == 0)
+ continue;
+
+ if (i == PERF_GROUP_OAG && gt->type != GT_MEDIA) {
+ g->regs = __oag_regs();
+ g->type = TYPE_OAG;
+ } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
+ g->regs = __oam_regs(mtl_oa_base[i]);
+ g->type = TYPE_OAM;
+ }
+ }
+}
+
+static int oa_init_gt(struct intel_gt *gt)
+{
+ u32 num_groups = num_perf_groups_per_gt(gt);
+ struct intel_engine_cs *engine;
+ struct i915_perf_group *g;
+ intel_engine_mask_t tmp;
+
+ g = kcalloc(num_groups, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ for_each_engine_masked(engine, gt, ALL_ENGINES, tmp) {
+ u32 index = __oa_engine_group(engine);
+
+ engine->oa_group = NULL;
+ if (index < num_groups) {
+ g[index].num_engines++;
+ engine->oa_group = &g[index];
+ }
+ }
+
+ gt->perf.num_perf_groups = num_groups;
+ gt->perf.group = g;
+
+ oa_init_groups(gt);
+
+ return 0;
+}
+
+static int oa_init_engine_groups(struct i915_perf *perf)
+{
+ struct intel_gt *gt;
+ int i, ret;
+
+ for_each_gt(gt, perf->i915, i) {
+ ret = oa_init_gt(gt);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void oa_init_supported_formats(struct i915_perf *perf)
+{
+ struct drm_i915_private *i915 = perf->i915;
+ enum intel_platform platform = INTEL_INFO(i915)->platform;
+
+ switch (platform) {
+ case INTEL_HASWELL:
+ oa_format_add(perf, I915_OA_FORMAT_A13);
+ oa_format_add(perf, I915_OA_FORMAT_A13);
+ oa_format_add(perf, I915_OA_FORMAT_A29);
+ oa_format_add(perf, I915_OA_FORMAT_A13_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_B4_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A45_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_B4_C8_A16);
+ oa_format_add(perf, I915_OA_FORMAT_C4_B8);
+ break;
+
+ case INTEL_BROADWELL:
+ case INTEL_CHERRYVIEW:
+ case INTEL_SKYLAKE:
+ case INTEL_BROXTON:
+ case INTEL_KABYLAKE:
+ case INTEL_GEMINILAKE:
+ case INTEL_COFFEELAKE:
+ case INTEL_COMETLAKE:
+ case INTEL_ICELAKE:
+ case INTEL_ELKHARTLAKE:
+ case INTEL_JASPERLAKE:
+ case INTEL_TIGERLAKE:
+ case INTEL_ROCKETLAKE:
+ case INTEL_DG1:
+ case INTEL_ALDERLAKE_S:
+ case INTEL_ALDERLAKE_P:
+ oa_format_add(perf, I915_OA_FORMAT_A12);
+ oa_format_add(perf, I915_OA_FORMAT_A12_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A32u40_A4u32_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_C4_B8);
+ break;
+
+ case INTEL_DG2:
+ oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
+ break;
+
+ case INTEL_METEORLAKE:
+ oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
+ oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
+ oa_format_add(perf, I915_OAM_FORMAT_MPEC8u64_B8_C8);
+ oa_format_add(perf, I915_OAM_FORMAT_MPEC8u32_B8_C8);
+ break;
+
+ default:
+ MISSING_CASE(platform);
+ }
+}
+
+static void i915_perf_init_info(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ switch (GRAPHICS_VER(i915)) {
+ case 8:
+ perf->ctx_oactxctrl_offset = 0x120;
+ perf->ctx_flexeu0_offset = 0x2ce;
+ perf->gen8_valid_ctx_bit = BIT(25);
+ break;
+ case 9:
+ perf->ctx_oactxctrl_offset = 0x128;
+ perf->ctx_flexeu0_offset = 0x3de;
+ perf->gen8_valid_ctx_bit = BIT(16);
+ break;
+ case 11:
+ perf->ctx_oactxctrl_offset = 0x124;
+ perf->ctx_flexeu0_offset = 0x78e;
+ perf->gen8_valid_ctx_bit = BIT(16);
+ break;
+ case 12:
+ perf->gen8_valid_ctx_bit = BIT(16);
+ /*
+ * Calculate offset at runtime in oa_pin_context for gen12 and
+ * cache the value in perf->ctx_oactxctrl_offset.
+ */
+ break;
+ default:
+ MISSING_CASE(GRAPHICS_VER(i915));
+ }
+}
/**
- * i915_perf_init - initialize i915-perf state on module load
- * @dev_priv: i915 device instance
+ * i915_perf_init - initialize i915-perf state on module bind
+ * @i915: i915 device instance
*
* Initializes i915-perf state without exposing anything to userspace.
*
* Note: i915-perf initialization is split into an 'init' and 'register'
* phase with the i915_perf_register() exposing state to userspace.
*/
-void i915_perf_init(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
- gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
- hsw_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
- dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
- dev_priv->perf.oa.ops.read = gen7_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read =
- gen7_oa_hw_tail_read;
-
- dev_priv->perf.oa.oa_formats = hsw_oa_formats;
- } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
+int i915_perf_init(struct drm_i915_private *i915)
+{
+ struct i915_perf *perf = &i915->perf;
+
+ perf->oa_formats = oa_formats;
+ if (IS_HASWELL(i915)) {
+ perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg = NULL;
+ perf->ops.enable_metric_set = hsw_enable_metric_set;
+ perf->ops.disable_metric_set = hsw_disable_metric_set;
+ perf->ops.oa_enable = gen7_oa_enable;
+ perf->ops.oa_disable = gen7_oa_disable;
+ perf->ops.read = gen7_oa_read;
+ perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read;
+ } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) {
/* Note: that although we could theoretically also support the
* legacy ringbuffer mode on BDW (and earlier iterations of
* this driver, before upstreaming did this) it didn't seem
* worth the complexity to maintain now that BDW+ enable
* execlist mode by default.
*/
- dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
-
- dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
- dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
- dev_priv->perf.oa.ops.read = gen8_oa_read;
- dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ perf->ops.read = gen8_oa_read;
+ i915_perf_init_info(i915);
- if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ if (IS_GRAPHICS_VER(i915, 8, 9)) {
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ perf->ops.is_valid_mux_reg =
gen8_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->perf.oa.ops.is_valid_mux_reg =
+ if (IS_CHERRYVIEW(i915)) {
+ perf->ops.is_valid_mux_reg =
chv_is_valid_mux_addr;
}
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
-
- if (IS_GEN8(dev_priv)) {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
-
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<25);
- } else {
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
-
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
- }
- } else if (IS_GEN(dev_priv, 10, 11)) {
- dev_priv->perf.oa.ops.is_valid_b_counter_reg =
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen8_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ } else if (GRAPHICS_VER(i915) == 11) {
+ perf->ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
- dev_priv->perf.oa.ops.is_valid_mux_reg =
- gen10_is_valid_mux_addr;
- dev_priv->perf.oa.ops.is_valid_flex_reg =
+ perf->ops.is_valid_mux_reg =
+ gen11_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg =
gen8_is_valid_flex_addr;
- dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
- dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
-
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ perf->ops.oa_enable = gen8_oa_enable;
+ perf->ops.oa_disable = gen8_oa_disable;
+ perf->ops.enable_metric_set = gen8_enable_metric_set;
+ perf->ops.disable_metric_set = gen11_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
+ } else if (GRAPHICS_VER(i915) == 12) {
+ perf->ops.is_valid_b_counter_reg =
+ HAS_OA_SLICE_CONTRIB_LIMITS(i915) ?
+ xehp_is_valid_b_counter_addr :
+ gen12_is_valid_b_counter_addr;
+ perf->ops.is_valid_mux_reg =
+ gen12_is_valid_mux_addr;
+ perf->ops.is_valid_flex_reg =
+ gen8_is_valid_flex_addr;
- dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
+ perf->ops.oa_enable = gen12_oa_enable;
+ perf->ops.oa_disable = gen12_oa_disable;
+ perf->ops.enable_metric_set = gen12_enable_metric_set;
+ perf->ops.disable_metric_set = gen12_disable_metric_set;
+ perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read;
}
}
- if (dev_priv->perf.oa.ops.enable_metric_set) {
- hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
- init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
+ if (perf->ops.enable_metric_set) {
+ struct intel_gt *gt;
+ int i, ret;
+
+ for_each_gt(gt, i915, i)
+ mutex_init(&gt->perf.lock);
+
+ /* Choose a representative limit */
+ oa_sample_rate_hard_limit = to_gt(i915)->clock_frequency / 2;
+
+ mutex_init(&perf->metrics_lock);
+ idr_init_base(&perf->metrics_idr, 1);
+
+ /* We set up some ratelimit state to potentially throttle any
+ * _NOTES about spurious, invalid OA reports which we don't
+ * forward to userspace.
+ *
+ * We print a _NOTE about any throttling when closing the
+ * stream instead of waiting until driver _fini which no one
+ * would ever see.
+ *
+ * Using the same limiting factors as printk_ratelimit()
+ */
+ ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10);
+ /* Since we use a DRM_NOTE for spurious reports it would be
+ * inconsistent to let __ratelimit() automatically print a
+ * warning for throttling.
+ */
+ ratelimit_set_flags(&perf->spurious_report_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ ratelimit_state_init(&perf->tail_pointer_race,
+ 5 * HZ, 10);
+ ratelimit_set_flags(&perf->tail_pointer_race,
+ RATELIMIT_MSG_ON_RELEASE);
- INIT_LIST_HEAD(&dev_priv->perf.streams);
- mutex_init(&dev_priv->perf.lock);
- spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
+ atomic64_set(&perf->noa_programming_delay,
+ 500 * 1000 /* 500us */);
- oa_sample_rate_hard_limit = 1000 *
- (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
- dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+ perf->i915 = i915;
- mutex_init(&dev_priv->perf.metrics_lock);
- idr_init(&dev_priv->perf.metrics_idr);
+ ret = oa_init_engine_groups(perf);
+ if (ret) {
+ drm_err(&i915->drm,
+ "OA initialization failed %d\n", ret);
+ return ret;
+ }
- dev_priv->perf.initialized = true;
+ oa_init_supported_formats(perf);
}
+
+ return 0;
}
static int destroy_config(int id, void *p, void *data)
{
- struct drm_i915_private *dev_priv = data;
- struct i915_oa_config *oa_config = p;
-
- put_oa_config(dev_priv, oa_config);
+ i915_oa_config_put(p);
+ return 0;
+}
+int i915_perf_sysctl_register(void)
+{
+ sysctl_header = register_sysctl("dev/i915", oa_table);
return 0;
}
+void i915_perf_sysctl_unregister(void)
+{
+ unregister_sysctl_table(sysctl_header);
+}
+
/**
* i915_perf_fini - Counter part to i915_perf_init()
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*/
-void i915_perf_fini(struct drm_i915_private *dev_priv)
+void i915_perf_fini(struct drm_i915_private *i915)
{
- if (!dev_priv->perf.initialized)
+ struct i915_perf *perf = &i915->perf;
+ struct intel_gt *gt;
+ int i;
+
+ if (!perf->i915)
return;
- idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv);
- idr_destroy(&dev_priv->perf.metrics_idr);
+ for_each_gt(gt, perf->i915, i)
+ kfree(gt->perf.group);
+
+ idr_for_each(&perf->metrics_idr, destroy_config, perf);
+ idr_destroy(&perf->metrics_idr);
+
+ memset(&perf->ops, 0, sizeof(perf->ops));
+ perf->i915 = NULL;
+}
- unregister_sysctl_table(dev_priv->perf.sysctl_header);
+/**
+ * i915_perf_ioctl_version - Version of the i915-perf subsystem
+ * @i915: The i915 device
+ *
+ * This version number is used by userspace to detect available features.
+ */
+int i915_perf_ioctl_version(struct drm_i915_private *i915)
+{
+ /*
+ * 1: Initial version
+ * I915_PERF_IOCTL_ENABLE
+ * I915_PERF_IOCTL_DISABLE
+ *
+ * 2: Added runtime modification of OA config.
+ * I915_PERF_IOCTL_CONFIG
+ *
+ * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold
+ * preemption on a particular context so that performance data is
+ * accessible from a delta of MI_RPC reports without looking at the
+ * OA buffer.
+ *
+ * 4: Add DRM_I915_PERF_PROP_ALLOWED_SSEU to limit what contexts can
+ * be run for the duration of the performance recording based on
+ * their SSEU configuration.
+ *
+ * 5: Add DRM_I915_PERF_PROP_POLL_OA_PERIOD parameter that controls the
+ * interval for the hrtimer used to check for OA data.
+ *
+ * 6: Add DRM_I915_PERF_PROP_OA_ENGINE_CLASS and
+ * DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE
+ *
+ * 7: Add support for video decode and enhancement classes.
+ */
- memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+ /*
+ * Wa_14017512683: mtl[a0..c0): Use of OAM must be preceded with Media
+ * C6 disable in BIOS. If Media C6 is enabled in BIOS, return version 6
+ * to indicate that OA media is not supported.
+ */
+ if (IS_MEDIA_GT_IP_STEP(i915->media_gt, IP_VER(13, 0), STEP_A0, STEP_C0) &&
+ intel_check_bios_c6_setup(&i915->media_gt->rc6))
+ return 6;
- dev_priv->perf.initialized = false;
+ return 7;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_perf.c"
+#endif