summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/uc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h26
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c49
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c32
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c103
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c121
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c235
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c46
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
26 files changed, 572 insertions, 171 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 811add10c30d..6de87ae5669e 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -207,6 +207,32 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+struct slpc_context_frequency_request {
+ u32 frequency_request:16;
+ u32 reserved:12;
+ u32 is_compute:1;
+ u32 ignore_busyness:1;
+ u32 is_minimum:1;
+ u32 is_predefined:1;
+} __packed;
+
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+
+struct slpc_optimized_strategies {
+ u32 compute:1;
+ u32 async_flip:1;
+ u32 media:1;
+ u32 vsync_flip:1;
+ u32 reserved:28;
+} __packed;
+
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
+enum slpc_power_profiles {
+ SLPC_POWER_PROFILES_BASE = 0x0,
+ SLPC_POWER_PROFILES_POWER_SAVING = 0x1
+};
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
index dabeaf4f245f..00d6402333f8 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_errors_abi.h
@@ -36,6 +36,7 @@ enum intel_guc_load_status {
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
INTEL_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
INTEL_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 58012edd4eb0..0c709e6c15be 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
@@ -101,4 +101,13 @@ enum {
GUC_CONTEXT_POLICIES_KLV_NUM_IDS = 5,
};
+/*
+ * Workaround keys:
+ */
+enum {
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE = 0x9001,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE = 0x9006,
+};
+
#endif /* _ABI_GUC_KLVS_ABI_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index 1fc0c17b1230..803c0379d97d 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -81,7 +81,7 @@ struct guc_debug_capture_list {
*
* intel_guc_capture module uses these structures to maintain static
* tables (per unique platform) that consists of lists of registers
- * (offsets, names, flags,...) that are used at the ADS regisration
+ * (offsets, names, flags,...) that are used at the ADS registration
* time as well as during runtime processing and reporting of error-
* capture states generated by GuC just prior to engine reset events.
*/
@@ -200,7 +200,7 @@ struct intel_guc_state_capture {
* dynamically allocate new nodes when receiving the G2H notification
* because the event handlers for all G2H event-processing is called
* by the ct processing worker queue and when that queue is being
- * processed, there is no absoluate guarantee that we are not in the
+ * processed, there is no absolute guarantee that we are not in the
* midst of a GT reset operation (which doesn't allow allocations).
*/
struct list_head cachelist;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
index e2e42b3e0d5d..d550eb6edfb8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -80,6 +80,7 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
const struct intel_gsc_cpd_header_v2 *cpd_header = NULL;
const struct intel_gsc_cpd_entry *cpd_entry = NULL;
const struct intel_gsc_manifest_header *manifest;
+ struct intel_uc_fw_ver min_ver = { 0 };
size_t min_size = sizeof(*layout);
int i;
@@ -212,6 +213,50 @@ int intel_gsc_fw_get_binary_info(struct intel_uc_fw *gsc_fw, const void *data, s
}
}
+ /*
+ * ARL SKUs require newer firmwares, but the blob is actually common
+ * across all MTL and ARL SKUs, so we need to do an explicit version check
+ * here rather than using a separate table entry. If a too old version
+ * is found, then just don't use GSC rather than aborting the driver load.
+ * Note that the major number in the GSC FW version is used to indicate
+ * the platform, so we expect it to always be 102 for MTL/ARL binaries.
+ */
+ if (IS_ARROWLAKE_S(gt->i915))
+ min_ver = (struct intel_uc_fw_ver){ 102, 0, 10, 1878 };
+ else if (IS_ARROWLAKE_H(gt->i915) || IS_ARROWLAKE_U(gt->i915))
+ min_ver = (struct intel_uc_fw_ver){ 102, 1, 15, 1926 };
+
+ if (IS_METEORLAKE(gt->i915) && gsc->release.major != 102) {
+ gt_info(gt, "Invalid GSC firmware for MTL/ARL, got %d.%d.%d.%d but need 102.x.x.x",
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build);
+ return -EINVAL;
+ }
+
+ if (min_ver.major) {
+ bool too_old = false;
+
+ if (gsc->release.minor < min_ver.minor) {
+ too_old = true;
+ } else if (gsc->release.minor == min_ver.minor) {
+ if (gsc->release.patch < min_ver.patch) {
+ too_old = true;
+ } else if (gsc->release.patch == min_ver.patch) {
+ if (gsc->release.build < min_ver.build)
+ too_old = true;
+ }
+ }
+
+ if (too_old) {
+ gt_info(gt, "GSC firmware too old for ARL, got %d.%d.%d.%d but need at least %d.%d.%d.%d",
+ gsc->release.major, gsc->release.minor,
+ gsc->release.patch, gsc->release.build,
+ min_ver.major, min_ver.minor,
+ min_ver.patch, min_ver.build);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -298,7 +343,7 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memcpy_toio(gsc->local_vaddr, src, gsc->fw.size);
memset_io(gsc->local_vaddr + gsc->fw.size, 0, gsc->local->size - gsc->fw.size);
- intel_guc_write_barrier(&gt->uc.guc);
+ intel_guc_write_barrier(gt_to_guc(gt));
i915_gem_object_unpin_map(gsc->fw.obj);
@@ -351,7 +396,7 @@ static int gsc_fw_query_compatibility_version(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_VER_PKT_SZ * 2,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt), GSC_VER_PKT_SZ * 2,
&vma, &vaddr);
if (err) {
gt_err(gt, "failed to allocate vma for GSC version query\n");
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 40817ebcca71..d8edd7c054c8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -5,8 +5,8 @@
#include <linux/component.h>
-#include <drm/i915_component.h>
-#include <drm/i915_gsc_proxy_mei_interface.h>
+#include <drm/intel/i915_component.h>
+#include <drm/intel/i915_gsc_proxy_mei_interface.h>
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
@@ -358,7 +358,8 @@ static int proxy_channel_alloc(struct intel_gsc_uc *gsc)
void *vaddr;
int err;
- err = intel_guc_allocate_and_map_vma(&gt->uc.guc, GSC_PROXY_CHANNEL_SIZE,
+ err = intel_guc_allocate_and_map_vma(gt_to_guc(gt),
+ GSC_PROXY_CHANNEL_SIZE,
&vma, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index 453d855dd1de..3d3191deb0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -302,7 +302,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- if (!intel_uc_fw_is_loadable(&gsc->fw))
+ if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
return;
if (intel_gsc_uc_fw_init_done(gsc))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 2b450c43bbd7..9df80c325fc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -239,8 +239,16 @@ static u32 guc_ctl_debug_flags(struct intel_guc *guc)
static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
u32 flags = 0;
+ /*
+ * Enable PXP GuC autoteardown flow.
+ * NB: MTL does things differently.
+ */
+ if (HAS_PXP(gt->i915) && !IS_METEORLAKE(gt->i915))
+ flags |= GUC_CTL_ENABLE_GUC_PXP_CTL;
+
if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
@@ -286,7 +294,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 &&
- GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50))
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
flags |= GUC_WA_POLLCS;
/* Wa_14014475959 */
@@ -294,6 +302,11 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
+ flags |= GUC_WA_RCS_CCS_SWITCHOUT;
+
/*
* Wa_14012197797
* Wa_22011391025
@@ -315,15 +328,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_G11(gt->i915))
flags |= GUC_WA_CONTEXT_ISOLATION;
- /* Wa_16015675438 */
- if (!RCS_MASK(gt))
- flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
-
- /* Wa_14018913170 */
- if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
- if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915))
- flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
- }
+ /*
+ * Wa_14018913170: Applicable to all platforms supported by i915 so
+ * don't bother testing for all X/Y/Z platforms explicitly.
+ */
+ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
return flags;
}
@@ -680,7 +690,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* H2G MMIO command completes.
*
* Don't abort on a failure code from the GuC. Keep going and do the
- * clean up in santize() and re-initialisation on resume and hopefully
+ * clean up in sanitize() and re-initialisation on resume and hopefully
* the error here won't be problematic.
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index be70c46604b4..053780f562c1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -204,6 +204,8 @@ struct intel_guc {
struct guc_mmio_reg *ads_regset;
/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @ads_waklv_size: size of workaround KLVs */
+ u32 ads_waklv_size;
/** @ads_capture_size: size of register lists in the ADS used for error capture */
u32 ads_capture_size;
@@ -293,7 +295,7 @@ struct intel_guc {
*/
struct work_struct dead_guc_worker;
/**
- * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrance
+ * @last_dead_guc_jiffies: timestamp of previous 'dead guc' occurrence
* used to prevent a fundamentally broken system from continuously
* reloading the GuC.
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index f7372f736a77..46fabbfc775e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -46,6 +46,10 @@
* +---------------------------------------+
* | padding |
* +---------------------------------------+ <== 4K aligned
+ * | w/a KLVs |
+ * +---------------------------------------+
+ * | padding |
+ * +---------------------------------------+ <== 4K aligned
* | capture lists |
* +---------------------------------------+
* | padding |
@@ -88,6 +92,11 @@ static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
return PAGE_ALIGN(guc->ads_golden_ctxt_size);
}
+static u32 guc_ads_waklv_size(struct intel_guc *guc)
+{
+ return PAGE_ALIGN(guc->ads_waklv_size);
+}
+
static u32 guc_ads_capture_size(struct intel_guc *guc)
{
return PAGE_ALIGN(guc->ads_capture_size);
@@ -113,7 +122,7 @@ static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
-static u32 guc_ads_capture_offset(struct intel_guc *guc)
+static u32 guc_ads_waklv_offset(struct intel_guc *guc)
{
u32 offset;
@@ -123,6 +132,16 @@ static u32 guc_ads_capture_offset(struct intel_guc *guc)
return PAGE_ALIGN(offset);
}
+static u32 guc_ads_capture_offset(struct intel_guc *guc)
+{
+ u32 offset;
+
+ offset = guc_ads_waklv_offset(guc) +
+ guc_ads_waklv_size(guc);
+
+ return PAGE_ALIGN(offset);
+}
+
static u32 guc_ads_private_data_offset(struct intel_guc *guc)
{
u32 offset;
@@ -393,7 +412,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
/* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
@@ -503,7 +522,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
-#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \
+#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \
XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
@@ -796,6 +815,73 @@ engine_instance_list:
return PAGE_ALIGN(total_size);
}
+static void guc_waklv_enable_simple(struct intel_guc *guc, u32 *offset, u32 *remain, u32 klv_id)
+{
+ u32 size;
+ u32 klv_entry[] = {
+ /* 16:16 key/length */
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
+ FIELD_PREP(GUC_KLV_0_LEN, 0),
+ /* 0 dwords data */
+ };
+
+ size = sizeof(klv_entry);
+ GEM_BUG_ON(*remain < size);
+
+ iosys_map_memcpy_to(&guc->ads_map, *offset, klv_entry, size);
+ *offset += size;
+ *remain -= size;
+}
+
+static void guc_waklv_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 offset, addr_ggtt, remain, size;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return;
+
+ if (GUC_FIRMWARE_VER(guc) < MAKE_GUC_VER(70, 10, 0))
+ return;
+
+ GEM_BUG_ON(iosys_map_is_null(&guc->ads_map));
+ offset = guc_ads_waklv_offset(guc);
+ remain = guc_ads_waklv_size(guc);
+
+ /* Wa_14019159160 */
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_SERIALIZED_RA_MODE);
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_AVOID_GFX_CLEAR_WHILE_ACTIVE);
+ }
+
+ /* Wa_16021333562 */
+ if ((GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 21, 1)) &&
+ (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) ||
+ IS_MEDIA_GT_IP_RANGE(gt, IP_VER(13, 0), IP_VER(13, 0)) ||
+ IS_DG2(gt->i915)))
+ guc_waklv_enable_simple(guc, &offset, &remain,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
+
+ size = guc_ads_waklv_size(guc) - remain;
+ if (!size)
+ return;
+
+ offset = guc_ads_waklv_offset(guc);
+ addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+
+ ads_blob_write(guc, ads.wa_klv_addr_lo, addr_ggtt);
+ ads_blob_write(guc, ads.wa_klv_addr_hi, 0);
+ ads_blob_write(guc, ads.wa_klv_size, size);
+}
+
+static int guc_prep_waklv(struct intel_guc *guc)
+{
+ /* Fudge something chunky for now: */
+ return PAGE_SIZE;
+}
+
static void __guc_ads_init(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -843,6 +929,9 @@ static void __guc_ads_init(struct intel_guc *guc)
/* MMIO save/restore list */
guc_mmio_reg_state_init(guc);
+ /* Workaround KLV list */
+ guc_waklv_init(guc);
+
/* Private Data */
ads_blob_write(guc, ads.private_data, base +
guc_ads_private_data_offset(guc));
@@ -886,6 +975,12 @@ int intel_guc_ads_create(struct intel_guc *guc)
return ret;
guc->ads_capture_size = ret;
+ /* And don't forget the workaround KLVs: */
+ ret = guc_prep_waklv(guc);
+ if (ret < 0)
+ return ret;
+ guc->ads_waklv_size = ret;
+
/* Now the total size can be determined: */
size = guc_ads_blob_size(guc);
@@ -961,7 +1056,7 @@ u32 intel_guc_engine_usage_offset(struct intel_guc *guc)
struct iosys_map intel_guc_engine_usage_record_map(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u8 guc_class = engine_class_to_guc_class(engine->class);
size_t offset = offsetof(struct __guc_ads_blob,
engine_usage.engines[guc_class][ilog2(engine->logical_mask)]);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a1cd40d80517..9547fff672bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -51,6 +51,7 @@
{ RING_ESR(0), 0, 0, "ESR" }, \
{ RING_DMA_FADD(0), 0, 0, "RING_DMA_FADD_LDW" }, \
{ RING_DMA_FADD_UDW(0), 0, 0, "RING_DMA_FADD_UDW" }, \
+ { RING_EIR(0), 0, 0, "EIR" }, \
{ RING_IPEIR(0), 0, 0, "IPEIR" }, \
{ RING_IPEHR(0), 0, 0, "IPEHR" }, \
{ RING_INSTPS(0), 0, 0, "INSTPS" }, \
@@ -80,9 +81,6 @@
{ GEN8_RING_PDP_LDW(0, 3), 0, 0, "PDP3_LDW" }, \
{ GEN8_RING_PDP_UDW(0, 3), 0, 0, "PDP3_UDW" }
-#define COMMON_BASE_HAS_EU \
- { EIR, 0, 0, "EIR" }
-
#define COMMON_BASE_RENDER \
{ GEN7_SC_INSTDONE, 0, 0, "GEN7_SC_INSTDONE" }
@@ -105,7 +103,6 @@ static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
/* XE_LP Render / Compute Per-Class */
static const struct __guc_mmio_reg_descr xe_lp_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
COMMON_GEN12BASE_RENDER,
};
@@ -148,7 +145,6 @@ static const struct __guc_mmio_reg_descr gen8_global_regs[] = {
};
static const struct __guc_mmio_reg_descr gen8_rc_class_regs[] = {
- COMMON_BASE_HAS_EU,
COMMON_BASE_RENDER,
};
@@ -1441,7 +1437,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!cap || !ee->engine)
return -ENODEV;
- guc = &ee->engine->gt->uc.guc;
+ guc = gt_to_guc(ee->engine->gt);
i915_error_printf(ebuf, "global --- GuC Error Capture on %s command stream:\n",
ee->engine->name);
@@ -1543,7 +1539,7 @@ bool intel_guc_capture_is_matching_engine(struct intel_gt *gt,
if (!gt || !ce || !engine)
return false;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return false;
@@ -1573,7 +1569,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
if (!gt || !ee || !ce)
return;
- guc = &gt->uc.guc;
+ guc = gt_to_guc(gt);
if (!guc->capture)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 52332bb14339..e7ccfa520df3 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt)
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING;
- if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING;
@@ -115,6 +115,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
case INTEL_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case INTEL_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
*success = false;
return true;
}
@@ -144,7 +145,7 @@ static inline bool guc_load_done(struct intel_uncore *uncore, u32 *status, bool
* an end user should hit the timeout is in case of extreme thermal throttling.
* And a system that is that hot during boot is probably dead anyway!
*/
-#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOAD_RETRY_LIMIT 20
#else
#define GUC_LOAD_RETRY_LIMIT 3
@@ -241,6 +242,11 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -EPERM;
break;
+ case INTEL_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ guc_info(guc, "invalid w/a KLV entry\n");
+ ret = -EINVAL;
+ break;
+
case INTEL_GUC_LOAD_STATUS_HWCONFIG_START:
guc_info(guc, "still extracting hwconfig table.\n");
ret = -ETIMEDOUT;
@@ -253,13 +259,14 @@ static int guc_wait_ucode(struct intel_guc *guc)
} else if (delta_ms > 200) {
guc_warn(guc, "excessive init time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, status, count, ret);
- guc_warn(guc, "excessive init time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ guc_warn(guc, "excessive init time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n",
+ before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
- guc_dbg(guc, "init took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&gt->rps),
- before_freq, status, count, ret);
+ guc_dbg(guc, "init took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps), status, count, ret);
}
return ret;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 8ae1846431da..eded00f0c7e1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -96,14 +96,16 @@
#define GUC_WA_GAM_CREDITS BIT(10)
#define GUC_WA_DUAL_QUEUE BIT(11)
#define GUC_WA_RCS_RESET_BEFORE_RC6 BIT(13)
-#define GUC_WA_CONTEXT_ISOLATION BIT(15)
#define GUC_WA_PRE_PARSER BIT(14)
+#define GUC_WA_CONTEXT_ISOLATION BIT(15)
+#define GUC_WA_RCS_CCS_SWITCHOUT BIT(16)
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
#define GUC_CTL_FEATURE 2
+#define GUC_CTL_ENABLE_GUC_PXP_CTL BIT(1)
#define GUC_CTL_ENABLE_SLPC BIT(2)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
@@ -294,7 +296,7 @@ struct guc_update_scheduling_policy_header {
} __packed;
/*
- * Can't dynmically allocate memory for the scheduling policy KLV because
+ * Can't dynamically allocate memory for the scheduling policy KLV because
* it will be sent from within the reset path. Need a fixed size lump on
* the stack instead :(.
*
@@ -406,7 +408,7 @@ enum guc_capture_type {
GUC_CAPTURE_LIST_TYPE_MAX,
};
-/* Class indecies for capture_class and capture_instance arrays */
+/* Class indices for capture_class and capture_instance arrays */
enum {
GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
@@ -430,7 +432,10 @@ struct guc_ads {
u32 capture_instance[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_class[GUC_CAPTURE_LIST_INDEX_MAX][GUC_MAX_ENGINE_CLASSES];
u32 capture_global[GUC_CAPTURE_LIST_INDEX_MAX];
- u32 reserved[14];
+ u32 wa_klv_addr_lo;
+ u32 wa_klv_addr_hi;
+ u32 wa_klv_size;
+ u32 reserved[11];
} __packed;
/* Engine usage stats */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
index cc9569af7f0c..868195c33f5b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_hwconfig.c
@@ -7,6 +7,7 @@
#include "gt/intel_hwconfig.h"
#include "i915_drv.h"
#include "i915_memcpy.h"
+#include "intel_guc_print.h"
/*
* GuC has a blob containing hardware configuration information (HWConfig).
@@ -42,6 +43,8 @@ static int __guc_action_get_hwconfig(struct intel_guc *guc,
};
int ret;
+ guc_dbg(guc, "Querying HW config table: size = %d, offset = 0x%08X\n",
+ ggtt_size, ggtt_offset);
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret == -ENXIO)
return -ENOENT;
@@ -111,7 +114,7 @@ static bool has_table(struct drm_i915_private *i915)
static int guc_hwconfig_init(struct intel_gt *gt)
{
struct intel_hwconfig *hwconfig = &gt->info.hwconfig;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!has_table(gt->i915))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index bf16351c9349..e8a04e476c57 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -14,11 +14,11 @@
#include "intel_guc_log.h"
#include "intel_guc_print.h"
-#if defined(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
-#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
+#elif IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 3e681ab6fbf9..d5ee6e5e1443 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -15,6 +15,34 @@
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+/**
+ * DOC: SLPC - Dynamic Frequency management
+ *
+ * Single Loop Power Control (SLPC) is a GuC algorithm that manages
+ * GT frequency based on busyness and how KMD initializes it. SLPC is
+ * almost completely in control after initialization except for a few
+ * scenarios mentioned below.
+ *
+ * KMD uses the concept of waitboost to ramp frequency to RP0 when there
+ * are pending submissions for a context. It achieves this by sending GuC a
+ * request to update the min frequency to RP0. Waitboost is disabled
+ * when the request retires.
+ *
+ * Another form of frequency control happens through per-context hints.
+ * A context can be marked as low latency during creation. That will ensure
+ * that SLPC uses an aggressive frequency ramp when that context is active.
+ *
+ * Power profiles add another level of control to these mechanisms.
+ * When power saving profile is chosen, SLPC will use conservative
+ * thresholds to ramp frequency, thus saving power. KMD will disable
+ * waitboosts as well, which achieves further power savings. Base profile
+ * is default and ensures balanced performance for any workload.
+ *
+ * Lastly, users have some level of control through sysfs, where min/max
+ * frequency values can be altered and the use of efficient freq
+ * can be toggled.
+ */
+
static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
{
return container_of(slpc, struct intel_guc, slpc);
@@ -265,6 +293,8 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->num_boosts = 0;
slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
+ slpc->power_profile = SLPC_POWER_PROFILES_BASE;
+
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
@@ -357,21 +387,29 @@ static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
}
-static void slpc_shared_data_reset(struct slpc_shared_data *data)
+static void slpc_shared_data_reset(struct intel_guc_slpc *slpc)
{
- memset(data, 0, sizeof(struct slpc_shared_data));
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ struct slpc_shared_data *data = slpc->vaddr;
+ memset(data, 0, sizeof(struct slpc_shared_data));
data->header.size = sizeof(struct slpc_shared_data);
/* Enable only GTPERF task, disable others */
slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
SLPC_PARAM_TASK_DISABLE_GTPERF);
- slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
- SLPC_PARAM_TASK_DISABLE_BALANCER);
+ /*
+ * Don't allow balancer related algorithms on platforms before
+ * Xe_LPG, where GuC started to restrict it to TDP limited scenarios.
+ */
+ if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)) {
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
+ SLPC_PARAM_TASK_DISABLE_BALANCER);
- slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
- SLPC_PARAM_TASK_DISABLE_DCC);
+ slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
+ SLPC_PARAM_TASK_DISABLE_DCC);
+ }
}
/**
@@ -537,6 +575,20 @@ int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
return ret;
}
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -553,6 +605,34 @@ int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
return ret;
}
+int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (val > SLPC_POWER_PROFILES_POWER_SAVING)
+ return -EINVAL;
+
+ mutex_lock(&slpc->lock);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_POWER_PROFILE,
+ val);
+ if (ret)
+ guc_err(slpc_to_guc(slpc),
+ "Failed to set power profile to %d: %pe\n",
+ val, ERR_PTR(ret));
+ else
+ slpc->power_profile = val;
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+
+ return ret;
+}
+
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
{
u32 pm_intrmsk_mbz = 0;
@@ -672,7 +752,7 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
GEM_BUG_ON(!slpc->vma);
- slpc_shared_data_reset(slpc->vaddr);
+ slpc_shared_data_reset(slpc);
ret = slpc_reset(slpc);
if (unlikely(ret < 0)) {
@@ -711,6 +791,16 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
/* Set cached media freq ratio mode */
intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+ /* Enable SLPC Optimized Strategy for compute */
+ intel_guc_slpc_set_strategy(slpc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+
+ /* Set cached value of power_profile */
+ ret = intel_guc_slpc_set_power_profile(slpc, slpc->power_profile);
+ if (unlikely(ret)) {
+ guc_probe_error(guc, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
return 0;
}
@@ -774,6 +864,23 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p
drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
drm_printf(p, "\tGTPERF task active: %s\n",
str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
+ drm_printf(p, "\tDCC enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_DCC_TASK_ENABLED));
+ drm_printf(p, "\tDCC in: %s\n",
+ str_yes_no(slpc_tasks->status & SLPC_IN_DCC));
+ drm_printf(p, "\tBalancer enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_ENABLED));
+ drm_printf(p, "\tIBC enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_IBC_TASK_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_IA_LMT_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT active: %s\n",
+ str_yes_no(slpc_tasks->status &
+ SLPC_BALANCER_IA_LMT_ACTIVE));
drm_printf(p, "\tMax freq: %u MHz\n",
slpc_decode_max_freq(slpc));
drm_printf(p, "\tMin freq: %u MHz\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 6ac6503c39d4..fc9f761b4372 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -45,5 +45,7 @@ void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val);
+int intel_guc_slpc_set_strategy(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_power_profile(struct intel_guc_slpc *slpc, u32 val);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index a88651331497..83673b10ac4e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -33,6 +33,9 @@ struct intel_guc_slpc {
u32 max_freq_softlimit;
bool ignore_eff_freq;
+ /* Base or power saving */
+ u32 power_profile;
+
/* cached media ratio mode */
u32 media_ratio_mode;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index f3dcae4b9d45..127316d2c8aa 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -398,7 +398,7 @@ static inline void set_context_guc_id_invalid(struct intel_context *ce)
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
{
- return &ce->engine->gt->uc.guc;
+ return gt_to_guc(ce->engine->gt);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -633,7 +633,7 @@ static int guc_submission_send_busy_loop(struct intel_guc *guc,
atomic_inc(&guc->outstanding_submission_g2h);
ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
- if (ret)
+ if (ret && g2h_len_dw)
atomic_dec(&guc->outstanding_submission_g2h);
return ret;
@@ -1223,7 +1223,7 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
* determine validity of these values. Instead we read the values multiple times
* until they are consistent. In test runs, 3 attempts results in consistent
* values. The upper bound is set to 6 attempts and may need to be tuned as per
- * any new occurences.
+ * any new occurrences.
*/
static void __get_engine_usage_record(struct intel_engine_cs *engine,
u32 *last_in, u32 *id, u32 *total)
@@ -1243,10 +1243,25 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine,
} while (++i < 6);
}
+static void __set_engine_usage_record(struct intel_engine_cs *engine,
+ u32 last_in, u32 id, u32 total)
+{
+ struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
+
+#define record_write(map_, field_, val_) \
+ iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_)
+
+ record_write(&rec_map, last_switch_in_stamp, last_in);
+ record_write(&rec_map, current_context_index, id);
+ record_write(&rec_map, total_runtime, total);
+
+#undef record_write
+}
+
static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
@@ -1270,15 +1285,12 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
static u32 gpm_timestamp_shift(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
- u32 reg, shift;
+ u32 reg;
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
- shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
-
- return 3 - shift;
+ return 3 - REG_FIELD_GET(GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
}
static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
@@ -1311,7 +1323,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc;
struct i915_gpu_error *gpu_error = &engine->i915->gpu_error;
struct intel_gt *gt = engine->gt;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
u64 total, gt_stamp_saved;
unsigned long flags;
u32 reset_count;
@@ -1339,7 +1351,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt);
if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
@@ -1363,9 +1375,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
total += intel_gt_clock_interval_to_ns(gt, clk);
}
+ if (total > stats->total)
+ stats->total = total;
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
- return ns_to_ktime(total);
+ return ns_to_ktime(stats->total);
}
static void guc_enable_busyness_worker(struct intel_guc *guc)
@@ -1403,14 +1418,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
* Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
* every possible call stack is unfeasible. It would be too intrusive to many
* areas that really don't care about the GuC backend. However, there is the
- * 'reset_in_progress' flag available, so just use that.
+ * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+ * So just use those. Note that testing both is required due to the hideously
+ * complex nature of the i915 driver's reset code paths.
*
* And note that in the case of a reset occurring during driver unload
- * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
- * is fine because there is another cancel in _finish (when the reset flag is
- * not).
+ * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+ * reset flag/mutex are set) is fine because there is another explicit cancel in
+ * intel_guc_submission_fini (when the reset flag/mutex are not).
*/
- if (guc_to_gt(guc)->uc.reset_in_progress)
+ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+ test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work);
else
cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,19 +1442,43 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
unsigned long flags;
ktime_t unused;
- guc_cancel_busyness_worker(guc);
-
spin_lock_irqsave(&guc->timestamp.lock, flags);
guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
+ struct intel_engine_guc_stats *stats = &engine->stats.guc;
+
guc_update_engine_gt_clks(engine);
- engine->stats.guc.prev_total = 0;
+
+ /*
+ * If resetting a running context, accumulate the active
+ * time as well since there will be no context switch.
+ */
+ if (stats->running) {
+ u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+
+ stats->total_gt_clks += clk;
+ }
+ stats->prev_total = 0;
+ stats->running = 0;
}
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __update_guc_busyness_running_state(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id)
+ engine->stats.guc.running = false;
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1542,6 +1584,9 @@ err_trylock:
static int guc_action_enable_usage_stats(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
u32 offset = intel_guc_engine_usage_offset(guc);
u32 action[] = {
INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
@@ -1549,6 +1594,9 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc)
0,
};
+ for_each_engine(engine, gt, id)
+ __set_engine_usage_record(engine, 0, 0xffffffff, 0);
+
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
@@ -1576,11 +1624,14 @@ static void guc_fini_engine_stats(struct intel_guc *guc)
void intel_guc_busyness_park(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
if (!guc_submission_initialized(guc))
return;
+ /* Assume no engines are running and set running state to false */
+ __update_guc_busyness_running_state(guc);
+
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
@@ -1603,7 +1654,7 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
unsigned long flags;
ktime_t unused;
@@ -1687,6 +1738,10 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
spin_lock_irq(guc_to_gt(guc)->irq_lock);
spin_unlock_irq(guc_to_gt(guc)->irq_lock);
+ /* Flush tasklet */
+ tasklet_disable(&guc->ct.receive_tasklet);
+ tasklet_enable(&guc->ct.receive_tasklet);
+
guc_flush_submissions(guc);
guc_flush_destroyed_contexts(guc);
flush_work(&guc->ct.requests.worker);
@@ -2004,12 +2059,7 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{
- /*
- * Ensure the busyness worker gets cancelled even on a fatal wedge.
- * Note that reset_prepare is not allowed to because it confuses lockdep.
- */
- if (guc_submission_initialized(guc))
- guc_cancel_busyness_worker(guc);
+ int outstanding;
/* Reset called during driver load or during wedge? */
if (unlikely(!guc_submission_initialized(guc) ||
@@ -2020,11 +2070,14 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
/*
* Technically possible for either of these values to be non-zero here,
- * but very unlikely + harmless. Regardless let's add a warn so we can
+ * but very unlikely + harmless. Regardless let's add an error so we can
* see in CI if this happens frequently / a precursor to taking down the
* machine.
*/
- GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
+ outstanding = atomic_read(&guc->outstanding_submission_g2h);
+ if (outstanding)
+ guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n",
+ outstanding);
atomic_set(&guc->outstanding_submission_g2h, 0);
intel_guc_global_policies_update(guc);
@@ -2136,6 +2189,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->submission_initialized)
return;
+ guc_fini_engine_stats(guc);
guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy_v69(guc);
i915_sched_engine_put(guc->sched_engine);
@@ -2194,7 +2248,7 @@ static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
- struct intel_guc *guc = &rq->engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(rq->engine->gt);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -2220,11 +2274,10 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
order_base_2(ce->parallel.number_children
+ 1));
else
- ret = ida_simple_get(&guc->submission_state.guc_ids,
- NUMBER_MULTI_LRC_GUC_ID(guc),
- guc->submission_state.num_guc_ids,
- GFP_KERNEL | __GFP_RETRY_MAYFAIL |
- __GFP_NOWARN);
+ ret = ida_alloc_range(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID(guc),
+ guc->submission_state.num_guc_ids - 1,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(ret < 0))
return ret;
@@ -2247,8 +2300,8 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+ 1));
} else {
--guc->submission_state.guc_ids_in_use;
- ida_simple_remove(&guc->submission_state.guc_ids,
- ce->guc_id.id);
+ ida_free(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
}
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
@@ -2645,6 +2698,7 @@ MAKE_CONTEXT_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_CONTEXT_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_CONTEXT_POLICY_ADD(priority, SCHEDULING_PRIORITY)
MAKE_CONTEXT_POLICY_ADD(preempt_to_idle, PREEMPT_TO_IDLE_ON_QUANTUM_EXPIRY)
+MAKE_CONTEXT_POLICY_ADD(slpc_ctx_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_CONTEXT_POLICY_ADD
@@ -2660,10 +2714,11 @@ static int __guc_context_set_context_policies(struct intel_guc *guc,
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct context_policy policy;
u32 execution_quantum;
u32 preemption_timeout;
+ u32 slpc_ctx_freq_req = 0;
unsigned long flags;
int ret;
@@ -2675,11 +2730,15 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+ if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
+ slpc_ctx_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
+
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
__guc_context_policy_add_execution_quantum(&policy, execution_quantum);
__guc_context_policy_add_preemption_timeout(&policy, preemption_timeout);
+ __guc_context_policy_add_slpc_ctx_freq_req(&policy, slpc_ctx_freq_req);
if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
__guc_context_policy_add_preempt_to_idle(&policy, 1);
@@ -2736,7 +2795,7 @@ static u32 map_guc_prio_to_lrc_desc_prio(u8 prio)
static void prepare_context_registration_info_v69(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
struct guc_lrc_desc_v69 *desc;
struct intel_context *child;
@@ -2805,7 +2864,7 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
struct guc_ctxt_registration_info *info)
{
struct intel_engine_cs *engine = ce->engine;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
u32 ctx_id = ce->guc_id.id;
GEM_BUG_ON(!engine->mask);
@@ -2842,9 +2901,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
ce->parallel.guc.wqi_tail = 0;
ce->parallel.guc.wqi_head = 0;
- wq_desc_offset = i915_ggtt_offset(ce->state) +
+ wq_desc_offset = (u64)i915_ggtt_offset(ce->state) +
__get_parent_scratch_offset(ce);
- wq_base_offset = i915_ggtt_offset(ce->state) +
+ wq_base_offset = (u64)i915_ggtt_offset(ce->state) +
__get_wq_offset(ce);
info->wq_desc_lo = lower_32_bits(wq_desc_offset);
info->wq_desc_hi = upper_32_bits(wq_desc_offset);
@@ -2868,7 +2927,7 @@ static int try_context_registration(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
intel_wakeref_t wakeref;
u32 ctx_id = ce->guc_id.id;
bool context_registered;
@@ -2949,7 +3008,7 @@ static int __guc_context_pin(struct intel_context *ce,
/*
* GuC context gets pinned in guc_request_alloc. See that function for
- * explaination of why.
+ * explanation of why.
*/
return lrc_pin(ce, engine, vaddr);
@@ -3384,18 +3443,29 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
* GuC is active, lets destroy this context, but at this point we can still be racing
* with suspend, so we undo everything if the H2G fails in deregister_context so
* that GuC reset will find this context during clean up.
+ *
+ * There is a race condition where the reset code could have altered
+ * this context's state and done a wakeref put before we try to
+ * deregister it here. So check if the context is still set to be
+ * destroyed before undoing earlier changes, to avoid two wakeref puts
+ * on the same context.
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
- spin_lock(&ce->guc_state.lock);
- set_context_registered(ce);
- clr_context_destroyed(ce);
- spin_unlock(&ce->guc_state.lock);
+ bool pending_destroyed;
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ pending_destroyed = context_destroyed(ce);
+ if (pending_destroyed) {
+ set_context_registered(ce);
+ clr_context_destroyed(ce);
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.
*/
- intel_wakeref_put_async(&gt->wakeref);
+ if (pending_destroyed)
+ intel_wakeref_put_async(&gt->wakeref);
}
return ret;
@@ -4267,20 +4337,25 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
/* Short circuit function */
- if (prio < I915_PRIORITY_NORMAL ||
- rq->guc_prio == GUC_PRIO_FINI ||
- (rq->guc_prio != GUC_PRIO_INIT &&
- !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
+ if (prio < I915_PRIORITY_NORMAL)
return;
spin_lock(&ce->guc_state.lock);
- if (rq->guc_prio != GUC_PRIO_FINI) {
- if (rq->guc_prio != GUC_PRIO_INIT)
- sub_context_inflight_prio(ce, rq->guc_prio);
- rq->guc_prio = new_guc_prio;
- add_context_inflight_prio(ce, rq->guc_prio);
- update_context_prio(ce);
- }
+
+ if (rq->guc_prio == GUC_PRIO_FINI)
+ goto exit;
+
+ if (!new_guc_prio_higher(rq->guc_prio, new_guc_prio))
+ goto exit;
+
+ if (rq->guc_prio != GUC_PRIO_INIT)
+ sub_context_inflight_prio(ce, rq->guc_prio);
+
+ rq->guc_prio = new_guc_prio;
+ add_context_inflight_prio(ce, rq->guc_prio);
+ update_context_prio(ce);
+
+exit:
spin_unlock(&ce->guc_state.lock);
}
@@ -4496,7 +4571,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
if (engine->class == COMPUTE_CLASS)
if (IS_GFX_GT_IP_STEP(engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
IS_DG2(engine->i915))
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
+
+ /* Wa_16019325821 */
+ /* Wa_14019159160 */
+ if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4507,7 +4588,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/
engine->emit_bb_start = gen8_emit_bb_start;
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
engine->emit_bb_start = xehp_emit_bb_start;
}
@@ -4549,7 +4630,7 @@ static void guc_sched_engine_destroy(struct kref *kref)
int intel_guc_submission_setup(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
/*
* The setup relies on several assumptions (e.g. irqs always enabled)
@@ -5308,7 +5389,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
void intel_guc_find_hung_context(struct intel_engine_cs *engine)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
@@ -5370,7 +5451,7 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
struct i915_request *hung_rq,
struct drm_printer *m)
{
- struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(engine->gt);
struct intel_context *ce;
unsigned long index;
unsigned long flags;
@@ -5462,12 +5543,20 @@ static inline void guc_log_context(struct drm_printer *p,
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
+ if (intel_context_pin_if_active(ce)) {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ intel_context_unpin(ce);
+ } else {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
+ ce->ring->head);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
+ ce->ring->tail);
+ }
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
@@ -5822,7 +5911,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
if (!ve)
return ERR_PTR(-ENOMEM);
- guc = &siblings[0]->gt->uc.guc;
+ guc = gt_to_guc(siblings[0]->gt);
ve->base.i915 = siblings[0]->i915;
ve->base.gt = siblings[0]->gt;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 0945b177d5f9..456d3372eef8 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -231,8 +231,8 @@ static void delayed_huc_load_init(struct intel_huc *huc)
sw_fence_dummy_notify);
i915_sw_fence_commit(&huc->delayed_load.fence);
- hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+ hrtimer_setup(&huc->delayed_load.timer, huc_delayed_load_timer_callback, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
}
static void delayed_huc_load_fini(struct intel_huc *huc)
@@ -317,6 +317,11 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
+void intel_huc_fini_late(struct intel_huc *huc)
+{
+ delayed_huc_load_fini(huc);
+}
+
#define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy")
static int check_huc_loading_mode(struct intel_huc *huc)
{
@@ -385,7 +390,7 @@ int intel_huc_init(struct intel_huc *huc)
if (HAS_ENGINE(gt, GSC0)) {
struct i915_vma *vma;
- vma = intel_guc_allocate_vma(&gt->uc.guc, PXP43_HUC_AUTH_INOUT_SIZE * 2);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
huc_info(huc, "Failed to allocate heci pkt\n");
@@ -414,12 +419,6 @@ out:
void intel_huc_fini(struct intel_huc *huc)
{
- /*
- * the fence is initialized in init_early, so we need to clean it up
- * even if HuC loading is off.
- */
- delayed_huc_load_fini(huc);
-
if (huc->heci_pkt)
i915_vma_unpin_and_release(&huc->heci_pkt, 0);
@@ -427,19 +426,6 @@ void intel_huc_fini(struct intel_huc *huc)
intel_uc_fw_fini(&huc->fw);
}
-void intel_huc_suspend(struct intel_huc *huc)
-{
- if (!intel_uc_fw_is_loadable(&huc->fw))
- return;
-
- /*
- * in the unlikely case that we're suspending before the GSC has
- * completed its loading sequence, just stop waiting. We'll restart
- * on resume.
- */
- delayed_huc_load_complete(huc);
-}
-
static const char *auth_mode_string(struct intel_huc *huc,
enum intel_huc_authentication_type type)
{
@@ -455,7 +441,7 @@ static const char *auth_mode_string(struct intel_huc *huc,
* an end user should hit the timeout is in case of extreme thermal throttling.
* And a system that is that hot during boot is probably dead anyway!
*/
-#if defined(CONFIG_DRM_I915_DEBUG_GEM)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
#define HUC_LOAD_RETRY_LIMIT 20
#else
#define HUC_LOAD_RETRY_LIMIT 3
@@ -502,13 +488,15 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
if (delta_ms > 50) {
huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n",
delta_ms, huc->status[type].reg.reg, count, ret);
- huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- intel_rps_read_actual_frequency(&gt->rps), before_freq,
+ huc_warn(huc, "excessive auth time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n",
+ before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt)));
} else {
- huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n",
- delta_ms, intel_rps_read_actual_frequency(&gt->rps),
- before_freq, huc->status[type].reg.reg, count, ret);
+ huc_dbg(huc, "auth took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n",
+ delta_ms, before_freq, intel_rps_read_actual_frequency(&gt->rps),
+ intel_rps_get_requested_frequency(&gt->rps),
+ huc->status[type].reg.reg, count, ret);
}
/* mark the load process as complete even if the wait failed */
@@ -540,7 +528,7 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type)
{
struct intel_gt *gt = huc_to_gt(huc);
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret;
if (!intel_uc_fw_is_loaded(&huc->fw))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index ba5cb08e9e7b..921ad4b1687f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -55,9 +55,9 @@ struct intel_huc {
int intel_huc_sanitize(struct intel_huc *huc);
void intel_huc_init_early(struct intel_huc *huc);
+void intel_huc_fini_late(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
-void intel_huc_suspend(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type);
int intel_huc_wait_for_auth_complete(struct intel_huc *huc,
enum intel_huc_authentication_type type);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 6dfe5d9456c6..4a3493e8d433 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -50,10 +50,6 @@ static void uc_expand_default_options(struct intel_uc *uc)
/* Default: enable HuC authentication and GuC submission */
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
-
- /* XEHPSDV and PVC do not use HuC */
- if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915))
- i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
}
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -103,7 +99,7 @@ static void __confirm_options(struct intel_uc *uc)
}
if (!intel_uc_supports_guc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
+ gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
@@ -140,6 +136,7 @@ void intel_uc_init_late(struct intel_uc *uc)
void intel_uc_driver_late_release(struct intel_uc *uc)
{
+ intel_huc_fini_late(&uc->huc);
}
/**
@@ -516,7 +513,7 @@ static int __uc_init_hw(struct intel_uc *uc)
ERR_PTR(ret), attempts);
}
- /* Did we succeded or run out of retries? */
+ /* Did we succeed or run out of retries? */
if (ret)
goto err_log_capture;
@@ -637,6 +634,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
+ /*
+ * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+ * So this function is sometimes called with the in-progress flag not set.
+ */
uc->reset_in_progress = false;
/* Firmware expected to be running when this function is called */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 756093eaf2ad..ec33ad942115 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -698,12 +698,18 @@ static int check_gsc_manifest(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
+ int ret;
+
switch (uc_fw->type) {
case INTEL_UC_FW_TYPE_HUC:
- intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ ret = intel_huc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ if (ret)
+ return ret;
break;
case INTEL_UC_FW_TYPE_GSC:
- intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ ret = intel_gsc_fw_get_binary_info(uc_fw, fw->data, fw->size);
+ if (ret)
+ return ret;
break;
default:
MISSING_CASE(uc_fw->type);
@@ -807,7 +813,7 @@ static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **
static int check_mtl_huc_guc_compatibility(struct intel_gt *gt,
struct intel_uc_fw_file *huc_selected)
{
- struct intel_uc_fw_file *guc_selected = &gt->uc.guc.fw.file_selected;
+ struct intel_uc_fw_file *guc_selected = &gt_to_guc(gt)->fw.file_selected;
struct intel_uc_fw_ver *huc_ver = &huc_selected->ver;
struct intel_uc_fw_ver *guc_ver = &guc_selected->ver;
bool new_huc, new_guc;
@@ -1209,7 +1215,7 @@ static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
* since its GGTT offset will be GuC accessible.
*/
GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
- vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
+ vma = intel_guc_allocate_vma(gt_to_guc(gt), PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 9a431726c8d5..ac7b3aad2222 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
}
+static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
+{
+ return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
+}
+
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
{
return uc_fw->user_overridden;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index c900aac85adb..68feb55654f7 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -144,7 +144,7 @@ err:
static int intel_guc_steal_guc_ids(void *arg)
{
struct intel_gt *gt = arg;
- struct intel_guc *guc = &gt->uc.guc;
+ struct intel_guc *guc = gt_to_guc(gt);
int ret, sv, context_index = 0;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 26fdc392fce6..83801c992488 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -64,7 +64,7 @@ static int intel_hang_guc(void *arg)
old_beat = engine->props.heartbeat_interval_ms;
ret = intel_engine_set_heartbeat(engine, BEAT_INTERVAL);
if (ret) {
- gt_err(gt, "Failed to boost heatbeat interval: %pe\n", ERR_PTR(ret));
+ gt_err(gt, "Failed to boost heartbeat interval: %pe\n", ERR_PTR(ret));
goto err;
}