summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/uc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/uc')
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c48
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h21
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c82
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c132
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c61
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h50
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c103
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c348
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c316
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h31
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c34
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c70
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c8
25 files changed, 1206 insertions, 185 deletions
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 29ef8afc8c2e..f359bef046e0 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
INTEL_GUC_ACTION_SCHED_CONTEXT = 0x1000,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
index 4c840a2639dc..811add10c30d 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
@@ -128,6 +128,15 @@ enum slpc_media_ratio_mode {
SLPC_MEDIA_RATIO_MODE_FIXED_ONE_TO_TWO = 2,
};
+enum slpc_gucrc_mode {
+ SLPC_GUCRC_MODE_HW = 0,
+ SLPC_GUCRC_MODE_GUCRC_NO_RC6 = 1,
+ SLPC_GUCRC_MODE_GUCRC_STATIC_TIMEOUT = 2,
+ SLPC_GUCRC_MODE_GUCRC_DYNAMIC_HYSTERESIS = 3,
+
+ SLPC_GUCRC_MODE_MAX,
+};
+
enum slpc_event_id {
SLPC_EVENT_RESET = 0,
SLPC_EVENT_SHUTDOWN = 1,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 4a59478c3b5c..58012edd4eb0 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -82,9 +82,16 @@
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
/*
+ * Global scheduling policy update keys.
+ */
+enum {
+ GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD = 0x1001,
+};
+
+/*
* Per context scheduling policy update keys.
*/
-enum {
+enum {
GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001,
GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002,
GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index bac06e3d6f2c..52aede324788 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -98,6 +98,8 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
spin_unlock_irq(gt->irq_lock);
+
+ guc->interrupts.enabled = true;
}
static void gen9_disable_guc_interrupts(struct intel_guc *guc)
@@ -105,6 +107,7 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
+ guc->interrupts.enabled = false;
spin_lock_irq(gt->irq_lock);
@@ -116,39 +119,39 @@ static void gen9_disable_guc_interrupts(struct intel_guc *guc)
gen9_reset_guc_interrupts(guc);
}
+static bool __gen11_reset_guc_interrupts(struct intel_gt *gt)
+{
+ u32 irq = gt->type == GT_MEDIA ? MTL_MGUC : GEN11_GUC;
+
+ lockdep_assert_held(gt->irq_lock);
+ return gen11_gt_reset_one_iir(gt, 0, irq);
+}
+
static void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
spin_lock_irq(gt->irq_lock);
- gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
+ __gen11_reset_guc_interrupts(gt);
spin_unlock_irq(gt->irq_lock);
}
static void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
spin_lock_irq(gt->irq_lock);
- WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
- intel_uncore_write(gt->uncore,
- GEN11_GUC_SG_INTR_ENABLE, events);
- intel_uncore_write(gt->uncore,
- GEN11_GUC_SG_INTR_MASK, ~events);
+ __gen11_reset_guc_interrupts(gt);
spin_unlock_irq(gt->irq_lock);
+
+ guc->interrupts.enabled = true;
}
static void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- spin_lock_irq(gt->irq_lock);
-
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
- intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
-
- spin_unlock_irq(gt->irq_lock);
+ guc->interrupts.enabled = false;
intel_synchronize_irq(gt->i915);
gen11_reset_guc_interrupts(guc);
@@ -156,7 +159,8 @@ static void gen11_disable_guc_interrupts(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC);
intel_guc_ct_init_early(&guc->ct);
@@ -168,12 +172,17 @@ void intel_guc_init_early(struct intel_guc *guc)
mutex_init(&guc->send_mutex);
spin_lock_init(&guc->irq_lock);
if (GRAPHICS_VER(i915) >= 11) {
- guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
guc->interrupts.reset = gen11_reset_guc_interrupts;
guc->interrupts.enable = gen11_enable_guc_interrupts;
guc->interrupts.disable = gen11_disable_guc_interrupts;
- guc->send_regs.base =
- i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
+ if (gt->type == GT_MEDIA) {
+ guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
+ guc->send_regs.base = i915_mmio_reg_offset(MEDIA_SOFT_SCRATCH(0));
+ } else {
+ guc->notify_reg = GEN11_GUC_HOST_INTERRUPT;
+ guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
+ }
+
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
} else {
@@ -441,6 +450,7 @@ err_log:
err_fw:
intel_uc_fw_fini(&guc->fw);
out:
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
i915_probe_error(gt->i915, "failed with %d\n", ret);
return ret;
}
@@ -870,14 +880,14 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
u32 status = intel_uncore_read(uncore, GUC_STATUS);
u32 i;
- drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "GuC status 0x%08x:\n", status);
drm_printf(p, "\tBootrom status = 0x%x\n",
(status & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
drm_printf(p, "\tuKernel status = 0x%x\n",
(status & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
drm_printf(p, "\tMIA Core status = 0x%x\n",
(status & GS_MIA_MASK) >> GS_MIA_SHIFT);
- drm_puts(p, "\nScratch registers:\n");
+ drm_puts(p, "Scratch registers:\n");
for (i = 0; i < 16; i++) {
drm_printf(p, "\t%2d: \t0x%x\n",
i, intel_uncore_read(uncore, SOFT_SCRATCH(i)));
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 804133df1ac9..1bb3f9829286 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -78,6 +78,7 @@ struct intel_guc {
/** @interrupts: pointers to GuC interrupt-managing functions. */
struct {
+ bool enabled;
void (*reset)(struct intel_guc *guc);
void (*enable)(struct intel_guc *guc);
void (*disable)(struct intel_guc *guc);
@@ -113,6 +114,10 @@ struct intel_guc {
*/
struct list_head guc_id_list;
/**
+ * @guc_ids_in_use: Number single-lrc guc_ids in use
+ */
+ unsigned int guc_ids_in_use;
+ /**
* @destroyed_contexts: list of contexts waiting to be destroyed
* (deregistered with the GuC)
*/
@@ -132,6 +137,16 @@ struct intel_guc {
* @reset_fail_mask: mask of engines that failed to reset
*/
intel_engine_mask_t reset_fail_mask;
+ /**
+ * @sched_disable_delay_ms: schedule disable delay, in ms, for
+ * contexts
+ */
+ unsigned int sched_disable_delay_ms;
+ /**
+ * @sched_disable_gucid_threshold: threshold of min remaining available
+ * guc_ids before we start bypassing the schedule disable delay
+ */
+ unsigned int sched_disable_gucid_threshold;
} submission_state;
/**
@@ -316,9 +331,11 @@ retry:
return err;
}
+/* Only call this from the interrupt handler code */
static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
{
- intel_guc_ct_event_handler(&guc->ct);
+ if (guc->interrupts.enabled)
+ intel_guc_ct_event_handler(&guc->ct);
}
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
@@ -466,4 +483,6 @@ void intel_guc_write_barrier(struct intel_guc *guc);
void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p);
+int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 74cbe8eaf531..a7f737c4792e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -5,6 +5,7 @@
#include <linux/bsearch.h>
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
@@ -277,24 +278,16 @@ __mmio_reg_add(struct temp_regset *regset, struct guc_mmio_reg *reg)
return slot;
}
-#define GUC_REGSET_STEERING(group, instance) ( \
- FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
- FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
- GUC_REGSET_NEEDS_STEERING \
-)
-
static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
struct temp_regset *regset,
- i915_reg_t reg, u32 flags)
+ u32 offset, u32 flags)
{
u32 count = regset->storage_used - (regset->registers - regset->storage);
- u32 offset = i915_mmio_reg_offset(reg);
struct guc_mmio_reg entry = {
.offset = offset,
.flags = flags,
};
struct guc_mmio_reg *slot;
- u8 group, inst;
/*
* The mmio list is built using separate lists within the driver.
@@ -306,17 +299,6 @@ static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
sizeof(entry), guc_mmio_reg_cmp))
return 0;
- /*
- * The GuC doesn't have a default steering, so we need to explicitly
- * steer all registers that need steering. However, we do not keep track
- * of all the steering ranges, only of those that have a chance of using
- * a non-default steering from the i915 pov. Instead of adding such
- * tracking, it is easier to just program the default steering for all
- * regs that don't need a non-default one.
- */
- intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
- entry.flags |= GUC_REGSET_STEERING(group, inst);
-
slot = __mmio_reg_add(regset, &entry);
if (IS_ERR(slot))
return PTR_ERR(slot);
@@ -335,6 +317,38 @@ static long __must_check guc_mmio_reg_add(struct intel_gt *gt,
#define GUC_MMIO_REG_ADD(gt, regset, reg, masked) \
guc_mmio_reg_add(gt, \
regset, \
+ i915_mmio_reg_offset(reg), \
+ (masked) ? GUC_REGSET_MASKED : 0)
+
+#define GUC_REGSET_STEERING(group, instance) ( \
+ FIELD_PREP(GUC_REGSET_STEERING_GROUP, (group)) | \
+ FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, (instance)) | \
+ GUC_REGSET_NEEDS_STEERING \
+)
+
+static long __must_check guc_mcr_reg_add(struct intel_gt *gt,
+ struct temp_regset *regset,
+ i915_mcr_reg_t reg, u32 flags)
+{
+ u8 group, inst;
+
+ /*
+ * The GuC doesn't have a default steering, so we need to explicitly
+ * steer all registers that need steering. However, we do not keep track
+ * of all the steering ranges, only of those that have a chance of using
+ * a non-default steering from the i915 pov. Instead of adding such
+ * tracking, it is easier to just program the default steering for all
+ * regs that don't need a non-default one.
+ */
+ intel_gt_mcr_get_nonterminated_steering(gt, reg, &group, &inst);
+ flags |= GUC_REGSET_STEERING(group, inst);
+
+ return guc_mmio_reg_add(gt, regset, i915_mmio_reg_offset(reg), flags);
+}
+
+#define GUC_MCR_REG_ADD(gt, regset, reg, masked) \
+ guc_mcr_reg_add(gt, \
+ regset, \
(reg), \
(masked) ? GUC_REGSET_MASKED : 0)
@@ -372,8 +386,21 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
false);
/* add in local MOCS registers */
- for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
- ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
+ for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
+ else
+ ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
+
+ if (GRAPHICS_VER(engine->i915) >= 12) {
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL0, false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL1, false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL2, false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL3, false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL4, false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL5, false);
+ ret |= GUC_MMIO_REG_ADD(gt, regset, EU_PERF_CNTL6, false);
+ }
return ret ? -1 : 0;
}
@@ -461,6 +488,11 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
info_map_write(info_map, engine_enabled_masks[GUC_BLITTER_CLASS], BCS_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEO_CLASS], VDBOX_MASK(gt));
info_map_write(info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS], VEBOX_MASK(gt));
+
+ /* The GSC engine is an instance (6) of OTHER_CLASS */
+ if (gt->engine[GSC0])
+ info_map_write(info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS],
+ BIT(gt->engine[GSC0]->instance));
}
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
@@ -502,9 +534,6 @@ static int guc_prep_golden_context(struct intel_guc *guc)
}
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
- if (engine_class == OTHER_CLASS)
- continue;
-
guc_class = engine_class_to_guc_class(engine_class);
if (!info_map_read(&info_map, engine_enabled_masks[guc_class]))
@@ -582,9 +611,6 @@ static void guc_init_golden_context(struct intel_guc *guc)
addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
- if (engine_class == OTHER_CLASS)
- continue;
-
guc_class = engine_class_to_guc_class(engine_class);
if (!ads_blob_read(guc, system_info.engine_enabled_masks[guc_class]))
continue;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 8f1165146013..1c1b85073b4b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -132,6 +132,11 @@ static const struct __guc_mmio_reg_descr xe_lpd_blt_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
+/* XE_LPD - GSC Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lpd_gsc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
/* GEN9 - Global */
static const struct __guc_mmio_reg_descr default_global_regs[] = {
COMMON_BASE_GLOBAL,
@@ -165,16 +170,20 @@ static const struct __guc_mmio_reg_descr empty_regs_list[] = {
}
/* List of lists */
-static struct __guc_mmio_reg_descr_group default_lists[] = {
+static const struct __guc_mmio_reg_descr_group default_lists[] = {
MAKE_REGLIST(default_global_regs, PF, GLOBAL, 0),
MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(default_rc_class_regs, PF, ENGINE_CLASS, GUC_COMPUTE_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_COMPUTE_CLASS),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_GSC_OTHER_CLASS),
+ MAKE_REGLIST(xe_lpd_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_GSC_OTHER_CLASS),
{}
};
@@ -182,12 +191,16 @@ static const struct __guc_mmio_reg_descr_group xe_lpd_lists[] = {
MAKE_REGLIST(xe_lpd_global_regs, PF, GLOBAL, 0),
MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_RENDER_CLASS),
MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_RENDER_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_class_regs, PF, ENGINE_CLASS, GUC_COMPUTE_CLASS),
+ MAKE_REGLIST(xe_lpd_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_COMPUTE_CLASS),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_VIDEO_CLASS),
MAKE_REGLIST(xe_lpd_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEO_CLASS),
MAKE_REGLIST(xe_lpd_vec_class_regs, PF, ENGINE_CLASS, GUC_VIDEOENHANCE_CLASS),
MAKE_REGLIST(xe_lpd_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_VIDEOENHANCE_CLASS),
MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_BLITTER_CLASS),
MAKE_REGLIST(xe_lpd_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_BLITTER_CLASS),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_GSC_OTHER_CLASS),
+ MAKE_REGLIST(xe_lpd_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_GSC_OTHER_CLASS),
{}
};
@@ -240,19 +253,19 @@ static void guc_capture_free_extlists(struct __guc_mmio_reg_descr_group *reglist
struct __ext_steer_reg {
const char *name;
- i915_reg_t reg;
+ i915_mcr_reg_t reg;
};
static const struct __ext_steer_reg xe_extregs[] = {
- {"GEN7_SAMPLER_INSTDONE", GEN7_SAMPLER_INSTDONE},
- {"GEN7_ROW_INSTDONE", GEN7_ROW_INSTDONE}
+ {"GEN8_SAMPLER_INSTDONE", GEN8_SAMPLER_INSTDONE},
+ {"GEN8_ROW_INSTDONE", GEN8_ROW_INSTDONE}
};
static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
const struct __ext_steer_reg *extlist,
int slice_id, int subslice_id)
{
- ext->reg = extlist->reg;
+ ext->reg = _MMIO(i915_mmio_reg_offset(extlist->reg));
ext->flags = FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
ext->regname = extlist->name;
@@ -419,6 +432,46 @@ guc_capture_get_device_reglist(struct intel_guc *guc)
return default_lists;
}
+static const char *
+__stringify_type(u32 type)
+{
+ switch (type) {
+ case GUC_CAPTURE_LIST_TYPE_GLOBAL:
+ return "Global";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS:
+ return "Class";
+ case GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE:
+ return "Instance";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static const char *
+__stringify_engclass(u32 class)
+{
+ switch (class) {
+ case GUC_RENDER_CLASS:
+ return "Render";
+ case GUC_VIDEO_CLASS:
+ return "Video";
+ case GUC_VIDEOENHANCE_CLASS:
+ return "VideoEnhance";
+ case GUC_BLITTER_CLASS:
+ return "Blitter";
+ case GUC_COMPUTE_CLASS:
+ return "Compute";
+ case GUC_GSC_OTHER_CLASS:
+ return "GSC-Other";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
static int
guc_capture_list_init(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
struct guc_mmio_reg *ptr, u16 num_entries)
@@ -482,32 +535,55 @@ guc_cap_list_num_regs(struct intel_guc_state_capture *gc, u32 owner, u32 type, u
return num_regs;
}
-int
-intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
- size_t *size)
+static int
+guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size, bool is_purpose_est)
{
struct intel_guc_state_capture *gc = guc->capture;
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][classid];
int num_regs;
- if (!gc->reglists)
+ if (!gc->reglists) {
+ drm_warn(&i915->drm, "GuC-capture: No reglist on this device\n");
return -ENODEV;
+ }
if (cache->is_valid) {
*size = cache->size;
return cache->status;
}
+ if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
+ !guc_capture_get_one_list(gc->reglists, owner, type, classid)) {
+ if (type == GUC_CAPTURE_LIST_TYPE_GLOBAL)
+ drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist Global!\n");
+ else
+ drm_warn(&i915->drm, "Missing GuC-Err-Cap reglist %s(%u):%s(%u)!\n",
+ __stringify_type(type), type,
+ __stringify_engclass(classid), classid);
+ return -ENODATA;
+ }
+
num_regs = guc_cap_list_num_regs(gc, owner, type, classid);
+ /* intentional empty lists can exist depending on hw config */
if (!num_regs)
return -ENODATA;
- *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
- (num_regs * sizeof(struct guc_mmio_reg)));
+ if (size)
+ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+ (num_regs * sizeof(struct guc_mmio_reg)));
return 0;
}
+int
+intel_guc_capture_getlistsize(struct intel_guc *guc, u32 owner, u32 type, u32 classid,
+ size_t *size)
+{
+ return guc_capture_getlistsize(guc, owner, type, classid, size, false);
+}
+
static void guc_capture_create_prealloc_nodes(struct intel_guc *guc);
int
@@ -606,7 +682,7 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int worst_min_size = 0, num_regs = 0;
+ int worst_min_size = 0;
size_t tmp = 0;
if (!guc->capture)
@@ -627,21 +703,19 @@ guc_capture_output_min_size_est(struct intel_guc *guc)
worst_min_size += sizeof(struct guc_state_capture_group_header_t) +
(3 * sizeof(struct guc_state_capture_header_t));
- if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp))
- num_regs += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_GLOBAL, 0, &tmp, true))
+ worst_min_size += tmp;
- if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
- engine->class, &tmp)) {
- num_regs += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_CLASS,
+ engine->class, &tmp, true)) {
+ worst_min_size += tmp;
}
- if (!intel_guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
- engine->class, &tmp)) {
- num_regs += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_CAPTURE_LIST_TYPE_ENGINE_INSTANCE,
+ engine->class, &tmp, true)) {
+ worst_min_size += tmp;
}
}
- worst_min_size += (num_regs * sizeof(struct guc_mmio_reg));
-
return worst_min_size;
}
@@ -658,15 +732,23 @@ static void check_guc_capture_size(struct intel_guc *guc)
int spare_size = min_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
u32 buffer_size = intel_guc_log_section_size_capture(&guc->log);
+ /*
+ * NOTE: min_size is much smaller than the capture region allocation (DG2: <80K vs 1MB)
+ * Additionally, its based on space needed to fit all engines getting reset at once
+ * within the same G2H handler task slot. This is very unlikely. However, if GuC really
+ * does run out of space for whatever reason, we will see an separate warning message
+ * when processing the G2H event capture-notification, search for:
+ * INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
+ */
if (min_size < 0)
drm_warn(&i915->drm, "Failed to calculate GuC error state capture buffer minimum size: %d!\n",
min_size);
else if (min_size > buffer_size)
- drm_warn(&i915->drm, "GuC error state capture buffer is too small: %d < %d\n",
+ drm_warn(&i915->drm, "GuC error state capture buffer maybe small: %d < %d\n",
buffer_size, min_size);
else if (spare_size > buffer_size)
- drm_notice(&i915->drm, "GuC error state capture buffer maybe too small: %d < %d (min = %d)\n",
- buffer_size, spare_size, min_size);
+ drm_dbg(&i915->drm, "GuC error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ buffer_size, spare_size, min_size);
}
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
index 25f09a420561..7269eb0bbedf 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -71,12 +71,73 @@ static bool intel_eval_slpc_support(void *data)
return intel_guc_slpc_is_used(guc);
}
+static int guc_sched_disable_delay_ms_get(void *data, u64 *val)
+{
+ struct intel_guc *guc = data;
+
+ if (!intel_guc_submission_is_used(guc))
+ return -ENODEV;
+
+ *val = (u64)guc->submission_state.sched_disable_delay_ms;
+
+ return 0;
+}
+
+static int guc_sched_disable_delay_ms_set(void *data, u64 val)
+{
+ struct intel_guc *guc = data;
+
+ if (!intel_guc_submission_is_used(guc))
+ return -ENODEV;
+
+ /* clamp to a practical limit, 1 minute is reasonable for a longest delay */
+ guc->submission_state.sched_disable_delay_ms = min_t(u64, val, 60000);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(guc_sched_disable_delay_ms_fops,
+ guc_sched_disable_delay_ms_get,
+ guc_sched_disable_delay_ms_set, "%lld\n");
+
+static int guc_sched_disable_gucid_threshold_get(void *data, u64 *val)
+{
+ struct intel_guc *guc = data;
+
+ if (!intel_guc_submission_is_used(guc))
+ return -ENODEV;
+
+ *val = guc->submission_state.sched_disable_gucid_threshold;
+ return 0;
+}
+
+static int guc_sched_disable_gucid_threshold_set(void *data, u64 val)
+{
+ struct intel_guc *guc = data;
+
+ if (!intel_guc_submission_is_used(guc))
+ return -ENODEV;
+
+ if (val > intel_guc_sched_disable_gucid_threshold_max(guc))
+ guc->submission_state.sched_disable_gucid_threshold =
+ intel_guc_sched_disable_gucid_threshold_max(guc);
+ else
+ guc->submission_state.sched_disable_gucid_threshold = val;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(guc_sched_disable_gucid_threshold_fops,
+ guc_sched_disable_gucid_threshold_get,
+ guc_sched_disable_gucid_threshold_set, "%lld\n");
+
void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
{
static const struct intel_gt_debugfs_file files[] = {
{ "guc_info", &guc_info_fops, NULL },
{ "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
{ "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
+ { "guc_sched_disable_delay_ms", &guc_sched_disable_delay_ms_fops, NULL },
+ { "guc_sched_disable_gucid_threshold", &guc_sched_disable_gucid_threshold_fops,
+ NULL },
};
if (!intel_guc_is_supported(guc))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index a0372735cddb..5b86b2e286e0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -10,12 +10,15 @@
*/
#include "gt/intel_gt.h"
+#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "intel_guc_fw.h"
#include "i915_drv.h"
-static void guc_prepare_xfer(struct intel_uncore *uncore)
+static void guc_prepare_xfer(struct intel_gt *gt)
{
+ struct intel_uncore *uncore = gt->uncore;
+
u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
@@ -35,8 +38,9 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
if (GRAPHICS_VER(uncore->i915) == 9) {
/* DOP Clock Gating Enable for GuC clocks */
- intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
- 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
+ intel_gt_mcr_multicast_write(gt, GEN8_MISCCPCTL,
+ GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+ intel_gt_mcr_read_any(gt, GEN8_MISCCPCTL));
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
@@ -168,7 +172,7 @@ int intel_guc_fw_upload(struct intel_guc *guc)
struct intel_uncore *uncore = gt->uncore;
int ret;
- guc_prepare_xfer(uncore);
+ guc_prepare_xfer(gt);
/*
* Note that GuC needs the CSS header plus uKernel code to be copied
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index 323b055e5db9..4ae5fc2f6002 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -47,7 +47,8 @@
#define GUC_VIDEOENHANCE_CLASS 2
#define GUC_BLITTER_CLASS 3
#define GUC_COMPUTE_CLASS 4
-#define GUC_LAST_ENGINE_CLASS GUC_COMPUTE_CLASS
+#define GUC_GSC_OTHER_CLASS 5
+#define GUC_LAST_ENGINE_CLASS GUC_GSC_OTHER_CLASS
#define GUC_MAX_ENGINE_CLASSES 16
#define GUC_MAX_INSTANCES_PER_CLASS 32
@@ -169,6 +170,7 @@ static u8 engine_class_guc_class_map[] = {
[COPY_ENGINE_CLASS] = GUC_BLITTER_CLASS,
[VIDEO_DECODE_CLASS] = GUC_VIDEO_CLASS,
[VIDEO_ENHANCEMENT_CLASS] = GUC_VIDEOENHANCE_CLASS,
+ [OTHER_CLASS] = GUC_GSC_OTHER_CLASS,
[COMPUTE_CLASS] = GUC_COMPUTE_CLASS,
};
@@ -178,12 +180,13 @@ static u8 guc_class_engine_class_map[] = {
[GUC_VIDEO_CLASS] = VIDEO_DECODE_CLASS,
[GUC_VIDEOENHANCE_CLASS] = VIDEO_ENHANCEMENT_CLASS,
[GUC_COMPUTE_CLASS] = COMPUTE_CLASS,
+ [GUC_GSC_OTHER_CLASS] = OTHER_CLASS,
};
static inline u8 engine_class_to_guc_class(u8 class)
{
BUILD_BUG_ON(ARRAY_SIZE(engine_class_guc_class_map) != MAX_ENGINE_CLASS + 1);
- GEM_BUG_ON(class > MAX_ENGINE_CLASS || class == OTHER_CLASS);
+ GEM_BUG_ON(class > MAX_ENGINE_CLASS);
return engine_class_guc_class_map[class];
}
@@ -290,6 +293,25 @@ struct guc_update_context_policy {
struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
} __packed;
+/* Format of the UPDATE_SCHEDULING_POLICIES H2G data packet */
+struct guc_update_scheduling_policy_header {
+ u32 action;
+} __packed;
+
+/*
+ * Can't dynmically allocate memory for the scheduling policy KLV because
+ * it will be sent from within the reset path. Need a fixed size lump on
+ * the stack instead :(.
+ *
+ * Currently, there is only one KLV defined, which has 1 word of KL + 2 words of V.
+ */
+#define MAX_SCHEDULING_POLICY_SIZE 3
+
+struct guc_update_scheduling_policy {
+ struct guc_update_scheduling_policy_header header;
+ u32 data[MAX_SCHEDULING_POLICY_SIZE];
+} __packed;
+
#define GUC_POWER_UNSPECIFIED 0
#define GUC_POWER_D0 1
#define GUC_POWER_D1 2
@@ -298,6 +320,9 @@ struct guc_update_context_policy {
/* Scheduling policy settings */
+#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION 100 /* in ms */
+#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO 50 /* in percent */
+
#define GLOBAL_POLICY_MAX_NUM_WI 15
/* Don't reset an engine upon preemption failure */
@@ -305,6 +330,27 @@ struct guc_update_context_policy {
#define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
+/*
+ * GuC converts the timeout to clock ticks internally. Different platforms have
+ * different GuC clocks. Thus, the maximum value before overflow is platform
+ * dependent. Current worst case scenario is about 110s. So, the spec says to
+ * limit to 100s to be safe.
+ */
+#define GUC_POLICY_MAX_EXEC_QUANTUM_US (100 * 1000 * 1000UL)
+#define GUC_POLICY_MAX_PREEMPT_TIMEOUT_US (100 * 1000 * 1000UL)
+
+static inline u32 guc_policy_max_exec_quantum_ms(void)
+{
+ BUILD_BUG_ON(GUC_POLICY_MAX_EXEC_QUANTUM_US >= UINT_MAX);
+ return GUC_POLICY_MAX_EXEC_QUANTUM_US / 1000;
+}
+
+static inline u32 guc_policy_max_preempt_timeout_ms(void)
+{
+ BUILD_BUG_ON(GUC_POLICY_MAX_PREEMPT_TIMEOUT_US >= UINT_MAX);
+ return GUC_POLICY_MAX_PREEMPT_TIMEOUT_US / 1000;
+}
+
struct guc_policies {
u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
/* In micro seconds. How much time to allow before DPC processing is
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 55d3ef93e86f..68331c538b0a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -16,15 +16,15 @@
#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M
-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
#elif defined(CONFIG_DRM_I915_DEBUG_GEM)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M
-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_4M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
#else
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K
#define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K
-#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_2M
+#define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M
#endif
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index 8f8dd05835c5..b5855091cf6a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -11,9 +11,20 @@
static bool __guc_rc_supported(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ /*
+ * Wa_14017073508: mtl
+ * Do not enable gucrc to avoid additional interrupts which
+ * may disrupt pcode wa.
+ */
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0) &&
+ gt->type == GT_MEDIA)
+ return false;
+
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(gt->i915) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index a7092f711e9c..9915de32e894 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -36,6 +36,7 @@
#define SOFT_SCRATCH_COUNT 16
#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4)
+#define MEDIA_SOFT_SCRATCH(n) _MMIO(0x190310 + (n) * 4)
#define GEN11_SOFT_SCRATCH_COUNT 4
#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
@@ -101,6 +102,7 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0)
+#define MEDIA_GUC_HOST_INTERRUPT _MMIO(0x190304)
#define GEN12_GUC_SEM_INTR_ENABLES _MMIO(0xc71c)
#define GUC_SEM_INTR_ROUTE_TO_GUC BIT(31)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index fdd895f73f9f..63464933cbce 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -137,6 +137,17 @@ static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
return ret > 0 ? -EPROTO : ret;
}
+static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
+{
+ u32 request[] = {
+ GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+ SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
+ id,
+ };
+
+ return intel_guc_send(guc, request, ARRAY_SIZE(request));
+}
+
static bool slpc_is_running(struct intel_guc_slpc *slpc)
{
return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
@@ -190,6 +201,15 @@ static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
return ret;
}
+static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
+{
+ struct intel_guc *guc = slpc_to_guc(slpc);
+
+ GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+ return guc_action_slpc_unset_param(guc, id);
+}
+
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
@@ -263,6 +283,7 @@ int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
slpc->max_freq_softlimit = 0;
slpc->min_freq_softlimit = 0;
+ slpc->min_is_rpmax = false;
slpc->boost_freq = 0;
atomic_set(&slpc->num_waiters, 0);
@@ -588,6 +609,39 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
return 0;
}
+static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ int slpc_min_freq;
+ int ret;
+
+ ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
+ if (ret) {
+ drm_err(&i915->drm,
+ "Failed to get min freq: (%d)\n",
+ ret);
+ return false;
+ }
+
+ if (slpc_min_freq == SLPC_MAX_FREQ_MHZ)
+ return true;
+ else
+ return false;
+}
+
+static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
+{
+ /* For server parts, SLPC min will be at RPMax.
+ * Use min softlimit to clamp it to RP0 instead.
+ */
+ if (!slpc->min_freq_softlimit &&
+ is_slpc_min_freq_rpmax(slpc)) {
+ slpc->min_is_rpmax = true;
+ slpc->min_freq_softlimit = slpc->rp0_freq;
+ (slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
+ }
+}
+
static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
{
/* Force SLPC to used platform rp0 */
@@ -610,6 +664,52 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
slpc->boost_freq = slpc->rp0_freq;
}
+/**
+ * intel_guc_slpc_override_gucrc_mode() - override GUCRC mode
+ * @slpc: pointer to intel_guc_slpc.
+ * @mode: new value of the mode.
+ *
+ * This function will override the GUCRC mode.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
+{
+ int ret;
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+
+ if (mode >= SLPC_GUCRC_MODE_MAX)
+ return -EINVAL;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
+ if (ret)
+ drm_err(&i915->drm,
+ "Override gucrc mode %d failed %d\n",
+ mode, ret);
+ }
+
+ return ret;
+}
+
+int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
+{
+ struct drm_i915_private *i915 = slpc_to_i915(slpc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
+ if (ret)
+ drm_err(&i915->drm,
+ "Unsetting gucrc mode failed %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
/*
* intel_guc_slpc_enable() - Start SLPC
* @slpc: pointer to intel_guc_slpc.
@@ -647,6 +747,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
slpc_get_rp_values(slpc);
+ /* Handle the case where min=max=RPmax */
+ update_server_min_softlimit(slpc);
+
/* Set SLPC max limit to RP0 */
ret = slpc_use_fused_rp0(slpc);
if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
index 82a98f78f96c..17ed515f6a85 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
@@ -9,6 +9,8 @@
#include "intel_guc_submission.h"
#include "intel_guc_slpc_types.h"
+#define SLPC_MAX_FREQ_MHZ 4250
+
struct intel_gt;
struct drm_printer;
@@ -42,5 +44,7 @@ int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc);
+int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc);
+int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
index 73d208123528..a6ef53b04e04 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
@@ -19,6 +19,9 @@ struct intel_guc_slpc {
bool supported;
bool selected;
+ /* Indicates this is a server part */
+ bool min_is_rpmax;
+
/* platform frequency limits */
u32 min_freq;
u32 rp0_freq;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 1db59eeb34db..0a42f1807f52 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -6,6 +6,7 @@
#include <linux/circ_buf.h>
#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/gen8_engine_cs.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
@@ -29,6 +30,7 @@
#include "intel_guc_submission.h"
#include "i915_drv.h"
+#include "i915_reg.h"
#include "i915_trace.h"
/**
@@ -65,7 +67,13 @@
* corresponding G2H returns indicating the scheduling disable operation has
* completed it is safe to unpin the context. While a disable is in flight it
* isn't safe to resubmit the context so a fence is used to stall all future
- * requests of that context until the G2H is returned.
+ * requests of that context until the G2H is returned. Because this interaction
+ * with the GuC takes a non-zero amount of time we delay the disabling of
+ * scheduling after the pin count goes to zero by a configurable period of time
+ * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of
+ * time to resubmit something on the context before doing this costly operation.
+ * This delay is only done if the context isn't closed and the guc_id usage is
+ * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD).
*
* Context deregistration:
* Before a context can be destroyed or if we steal its guc_id we must
@@ -163,7 +171,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
#define SCHED_STATE_PENDING_ENABLE BIT(5)
#define SCHED_STATE_REGISTERED BIT(6)
#define SCHED_STATE_POLICY_REQUIRED BIT(7)
-#define SCHED_STATE_BLOCKED_SHIFT 8
+#define SCHED_STATE_CLOSED BIT(8)
+#define SCHED_STATE_BLOCKED_SHIFT 9
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
@@ -173,12 +182,20 @@ static inline void init_sched_state(struct intel_context *ce)
ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
}
+/*
+ * Kernel contexts can have SCHED_STATE_REGISTERED after suspend.
+ * A context close can race with the submission path, so SCHED_STATE_CLOSED
+ * can be set immediately before we try to register.
+ */
+#define SCHED_STATE_VALID_INIT \
+ (SCHED_STATE_BLOCKED_MASK | \
+ SCHED_STATE_CLOSED | \
+ SCHED_STATE_REGISTERED)
+
__maybe_unused
static bool sched_state_is_init(struct intel_context *ce)
{
- /* Kernel contexts can have SCHED_STATE_REGISTERED after suspend. */
- return !(ce->guc_state.sched_state &
- ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
+ return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT);
}
static inline bool
@@ -319,6 +336,17 @@ static inline void clr_context_policy_required(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
}
+static inline bool context_close_done(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_CLOSED;
+}
+
+static inline void set_context_close_done(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_CLOSED;
+}
+
static inline u32 context_blocked(struct intel_context *ce)
{
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
@@ -343,25 +371,6 @@ static inline void decr_context_blocked(struct intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
}
-static inline bool context_has_committed_requests(struct intel_context *ce)
-{
- return !!ce->guc_state.number_committed_requests;
-}
-
-static inline void incr_context_committed_requests(struct intel_context *ce)
-{
- lockdep_assert_held(&ce->guc_state.lock);
- ++ce->guc_state.number_committed_requests;
- GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
-}
-
-static inline void decr_context_committed_requests(struct intel_context *ce)
-{
- lockdep_assert_held(&ce->guc_state.lock);
- --ce->guc_state.number_committed_requests;
- GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
-}
-
static struct intel_context *
request_to_scheduling_context(struct i915_request *rq)
{
@@ -1067,6 +1076,12 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
xa_unlock(&guc->context_lookup);
+ if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
+ (cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) {
+ /* successful cancel so jump straight to close it */
+ intel_context_sched_disable_unpin(ce);
+ }
+
spin_lock(&ce->guc_state.lock);
/*
@@ -1387,7 +1402,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
/*
* Synchronize with gt reset to make sure the worker does not
- * corrupt the engine/guc stats.
+ * corrupt the engine/guc stats. NB: can't actually block waiting
+ * for a reset to complete as the reset requires flushing out
+ * this worker thread if started. So waiting would deadlock.
*/
ret = intel_gt_reset_trylock(gt, &srcu);
if (ret)
@@ -1994,6 +2011,9 @@ static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
if (unlikely(ret < 0))
return ret;
+ if (!intel_context_is_parent(ce))
+ ++guc->submission_state.guc_ids_in_use;
+
ce->guc_id.id = ret;
return 0;
}
@@ -2003,14 +2023,16 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
GEM_BUG_ON(intel_context_is_child(ce));
if (!context_guc_id_invalid(ce)) {
- if (intel_context_is_parent(ce))
+ if (intel_context_is_parent(ce)) {
bitmap_release_region(guc->submission_state.guc_ids_bitmap,
ce->guc_id.id,
order_base_2(ce->parallel.number_children
+ 1));
- else
+ } else {
+ --guc->submission_state.guc_ids_in_use;
ida_simple_remove(&guc->submission_state.guc_ids,
ce->guc_id.id);
+ }
clr_ctx_id_mapping(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
@@ -2429,6 +2451,10 @@ static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
int ret;
/* NB: For both of these, zero means disabled. */
+ GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
+ execution_quantum));
+ GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
+ preemption_timeout));
execution_quantum = engine->props.timeslice_duration_ms * 1000;
preemption_timeout = engine->props.preempt_timeout_ms * 1000;
@@ -2462,6 +2488,10 @@ static void guc_context_policy_init_v69(struct intel_engine_cs *engine,
desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE_V69;
/* NB: For both of these, zero means disabled. */
+ GEM_BUG_ON(overflows_type(engine->props.timeslice_duration_ms * 1000,
+ desc->execution_quantum));
+ GEM_BUG_ON(overflows_type(engine->props.preempt_timeout_ms * 1000,
+ desc->preemption_timeout));
desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
}
@@ -2998,41 +3028,104 @@ guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
}
}
-static void guc_context_sched_disable(struct intel_context *ce)
+static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
+ unsigned long flags)
+ __releases(ce->guc_state.lock)
{
- struct intel_guc *guc = ce_to_guc(ce);
- unsigned long flags;
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
u16 guc_id;
+ lockdep_assert_held(&ce->guc_state.lock);
+ guc_id = prep_context_pending_disable(ce);
+
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+ with_intel_runtime_pm(runtime_pm, wakeref)
+ __guc_context_sched_disable(guc, ce, guc_id);
+}
+
+static bool bypass_sched_disable(struct intel_guc *guc,
+ struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(intel_context_is_child(ce));
+ if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
+ !ctx_id_mapped(guc, ce->guc_id.id)) {
+ clr_context_enabled(ce);
+ return true;
+ }
+
+ return !context_enabled(ce);
+}
+
+static void __delay_sched_disable(struct work_struct *wrk)
+{
+ struct intel_context *ce =
+ container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
+ struct intel_guc *guc = ce_to_guc(ce);
+ unsigned long flags;
+
spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (bypass_sched_disable(guc, ce)) {
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ intel_context_sched_disable_unpin(ce);
+ } else {
+ do_sched_disable(guc, ce, flags);
+ }
+}
+
+static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
+{
/*
- * We have to check if the context has been disabled by another thread,
- * check if submssion has been disabled to seal a race with reset and
- * finally check if any more requests have been committed to the
- * context ensursing that a request doesn't slip through the
- * 'context_pending_disable' fence.
+ * parent contexts are perma-pinned, if we are unpinning do schedule
+ * disable immediately.
*/
- if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
- context_has_committed_requests(ce))) {
- clr_context_enabled(ce);
+ if (intel_context_is_parent(ce))
+ return true;
+
+ /*
+ * If we are beyond the threshold for avail guc_ids, do schedule disable immediately.
+ */
+ return guc->submission_state.guc_ids_in_use >
+ guc->submission_state.sched_disable_gucid_threshold;
+}
+
+static void guc_context_sched_disable(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+ u64 delay = guc->submission_state.sched_disable_delay_ms;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+ if (bypass_sched_disable(guc, ce)) {
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ intel_context_sched_disable_unpin(ce);
+ } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
+ delay) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- goto unpin;
+ mod_delayed_work(system_unbound_wq,
+ &ce->guc_state.sched_disable_delay_work,
+ msecs_to_jiffies(delay));
+ } else {
+ do_sched_disable(guc, ce, flags);
}
- guc_id = prep_context_pending_disable(ce);
+}
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+static void guc_context_close(struct intel_context *ce)
+{
+ unsigned long flags;
- with_intel_runtime_pm(runtime_pm, wakeref)
- __guc_context_sched_disable(guc, ce, guc_id);
+ if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
+ cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))
+ __delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
- return;
-unpin:
- intel_context_sched_disable_unpin(ce);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ set_context_close_done(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
static inline void guc_lrc_desc_unpin(struct intel_context *ce)
@@ -3071,7 +3164,6 @@ static void __guc_context_destroy(struct intel_context *ce)
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
- GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
intel_context_fini(ce);
@@ -3340,8 +3432,6 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce);
- decr_context_committed_requests(ce);
-
spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id.ref);
@@ -3351,6 +3441,8 @@ static void remove_from_context(struct i915_request *rq)
static const struct intel_context_ops guc_context_ops = {
.alloc = guc_context_alloc,
+ .close = guc_context_close,
+
.pre_pin = guc_context_pre_pin,
.pin = guc_context_pin,
.unpin = guc_context_unpin,
@@ -3433,6 +3525,10 @@ static void guc_context_init(struct intel_context *ce)
rcu_read_unlock();
ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
+
+ INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work,
+ __delay_sched_disable);
+
set_bit(CONTEXT_GUC_INIT, &ce->flags);
}
@@ -3471,6 +3567,26 @@ static int guc_request_alloc(struct i915_request *rq)
guc_context_init(ce);
/*
+ * If the context gets closed while the execbuf is ongoing, the context
+ * close code will race with the below code to cancel the delayed work.
+ * If the context close wins the race and cancels the work, it will
+ * immediately call the sched disable (see guc_context_close), so there
+ * is a chance we can get past this check while the sched_disable code
+ * is being executed. To make sure that code completes before we check
+ * the status further down, we wait for the close process to complete.
+ * Else, this code path could send a request down thinking that the
+ * context is still in a schedule-enable mode while the GuC ends up
+ * dropping the request completely because the disable did go from the
+ * context_close path right to GuC just prior. In the event the CT is
+ * full, we could potentially need to wait up to 1.5 seconds.
+ */
+ if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work))
+ intel_context_sched_disable_unpin(ce);
+ else if (intel_context_is_closed(ce))
+ if (wait_for(context_close_done(ce), 1500))
+ drm_warn(&guc_to_gt(guc)->i915->drm,
+ "timed out waiting on context sched close before realloc\n");
+ /*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
* guc_id and creating horrible race conditions. This is especially bad
@@ -3524,7 +3640,6 @@ out:
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
}
- incr_context_committed_requests(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;
@@ -3600,6 +3715,8 @@ static int guc_virtual_context_alloc(struct intel_context *ce)
static const struct intel_context_ops virtual_guc_context_ops = {
.alloc = guc_virtual_context_alloc,
+ .close = guc_context_close,
+
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
.unpin = guc_virtual_context_unpin,
@@ -3689,6 +3806,8 @@ static void guc_child_context_destroy(struct kref *kref)
static const struct intel_context_ops virtual_parent_context_ops = {
.alloc = guc_virtual_context_alloc,
+ .close = guc_context_close,
+
.pre_pin = guc_context_pre_pin,
.pin = guc_parent_context_pin,
.unpin = guc_parent_context_unpin,
@@ -3995,6 +4114,9 @@ static inline void guc_kernel_context_pin(struct intel_guc *guc,
if (context_guc_id_invalid(ce))
pin_guc_id(guc, ce);
+ if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
+ guc_context_init(ce);
+
try_context_registration(ce, true);
}
@@ -4093,7 +4215,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_bb_start = gen8_emit_bb_start;
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
- engine->emit_bb_start = gen125_emit_bb_start;
+ engine->emit_bb_start = xehp_emit_bb_start;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4177,6 +4299,98 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
return 0;
}
+struct scheduling_policy {
+ /* internal data */
+ u32 max_words, num_words;
+ u32 count;
+ /* API data */
+ struct guc_update_scheduling_policy h2g;
+};
+
+static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy)
+{
+ u32 *start = (void *)&policy->h2g;
+ u32 *end = policy->h2g.data + policy->num_words;
+ size_t delta = end - start;
+
+ return delta;
+}
+
+static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy)
+{
+ policy->h2g.header.action = INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+ policy->max_words = ARRAY_SIZE(policy->h2g.data);
+ policy->num_words = 0;
+ policy->count = 0;
+
+ return policy;
+}
+
+static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy,
+ u32 action, u32 *data, u32 len)
+{
+ u32 *klv_ptr = policy->h2g.data + policy->num_words;
+
+ GEM_BUG_ON((policy->num_words + 1 + len) > policy->max_words);
+ *(klv_ptr++) = FIELD_PREP(GUC_KLV_0_KEY, action) |
+ FIELD_PREP(GUC_KLV_0_LEN, len);
+ memcpy(klv_ptr, data, sizeof(u32) * len);
+ policy->num_words += 1 + len;
+ policy->count++;
+}
+
+static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
+ struct scheduling_policy *policy)
+{
+ int ret;
+
+ ret = intel_guc_send(guc, (u32 *)&policy->h2g,
+ __guc_scheduling_policy_action_size(policy));
+ if (ret < 0)
+ return ret;
+
+ if (ret != policy->count) {
+ drm_warn(&guc_to_gt(guc)->i915->drm, "GuC global scheduler policy processed %d of %d KLVs!",
+ ret, policy->count);
+ if (ret > policy->count)
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int guc_init_global_schedule_policy(struct intel_guc *guc)
+{
+ struct scheduling_policy policy;
+ struct intel_gt *gt = guc_to_gt(guc);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ if (GET_UC_VER(guc) < MAKE_UC_VER(70, 3, 0))
+ return 0;
+
+ __guc_scheduling_policy_start_klv(&policy);
+
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
+ u32 yield[] = {
+ GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION,
+ GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO,
+ };
+
+ __guc_scheduling_policy_add_klv(&policy,
+ GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD,
+ yield, ARRAY_SIZE(yield));
+
+ ret = __guc_action_set_scheduling_policies(guc, &policy);
+ if (ret)
+ i915_probe_error(gt->i915,
+ "Failed to configure global scheduling policies: %pe!\n",
+ ERR_PTR(ret));
+ }
+
+ return ret;
+}
+
void intel_guc_submission_enable(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -4189,6 +4403,7 @@ void intel_guc_submission_enable(struct intel_guc *guc)
guc_init_lrc_mapping(guc);
guc_init_engine_stats(guc);
+ guc_init_global_schedule_policy(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -4219,6 +4434,26 @@ static bool __guc_submission_selected(struct intel_guc *guc)
return i915->params.enable_guc & ENABLE_GUC_SUBMISSION;
}
+int intel_guc_sched_disable_gucid_threshold_max(struct intel_guc *guc)
+{
+ return guc->submission_state.num_guc_ids - NUMBER_MULTI_LRC_GUC_ID(guc);
+}
+
+/*
+ * This default value of 33 milisecs (+1 milisec round up) ensures 30fps or higher
+ * workloads are able to enjoy the latency reduction when delaying the schedule-disable
+ * operation. This matches the 30fps game-render + encode (real world) workload this
+ * knob was tested against.
+ */
+#define SCHED_DISABLE_DELAY_MS 34
+
+/*
+ * A threshold of 75% is a reasonable starting point considering that real world apps
+ * generally don't get anywhere near this.
+ */
+#define NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(__guc) \
+ (((intel_guc_sched_disable_gucid_threshold_max(guc)) * 3) / 4)
+
void intel_guc_submission_init_early(struct intel_guc *guc)
{
xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
@@ -4235,7 +4470,10 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
+ guc->submission_state.sched_disable_delay_ms = SCHED_DISABLE_DELAY_MS;
guc->submission_state.num_guc_ids = GUC_MAX_CONTEXT_ID;
+ guc->submission_state.sched_disable_gucid_threshold =
+ NUM_SCHED_DISABLE_GUCIDS_DEFAULT_THRESHOLD(guc);
guc->submission_supported = __guc_submission_supported(guc);
guc->submission_selected = __guc_submission_selected(guc);
}
@@ -4669,7 +4907,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
atomic_read(&guc->outstanding_submission_g2h));
- drm_printf(p, "GuC tasklet count: %u\n\n",
+ drm_printf(p, "GuC tasklet count: %u\n",
atomic_read(&sched_engine->tasklet.count));
spin_lock_irqsave(&sched_engine->lock, flags);
@@ -4717,7 +4955,7 @@ static inline void guc_log_context(struct drm_printer *p,
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
atomic_read(&ce->guc_id.ref));
- drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
+ drm_printf(p, "\t\tSchedule State: 0x%x\n",
ce->guc_state.sched_state);
}
@@ -4746,7 +4984,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
READ_ONCE(*ce->parallel.guc.wq_head));
drm_printf(p, "\t\tWQI Tail: %u\n",
READ_ONCE(*ce->parallel.guc.wq_tail));
- drm_printf(p, "\t\tWQI Status: %u\n\n",
+ drm_printf(p, "\t\tWQI Status: %u\n",
READ_ONCE(*ce->parallel.guc.wq_status));
}
@@ -4754,7 +4992,7 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc,
emit_bb_start_parent_no_preempt_mid_batch) {
u8 i;
- drm_printf(p, "\t\tChildren Go: %u\n\n",
+ drm_printf(p, "\t\tChildren Go: %u\n",
get_children_go_value(ce));
for (i = 0; i < ce->parallel.number_children; ++i)
drm_printf(p, "\t\tChildren Join: %u\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 3bb8838e325a..410905da8e97 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -10,6 +10,9 @@
#include "intel_huc.h"
#include "i915_drv.h"
+#include <linux/device/bus.h>
+#include <linux/mei_aux.h>
+
/**
* DOC: HuC
*
@@ -42,12 +45,240 @@
* HuC-specific commands.
*/
+/*
+ * MEI-GSC load is an async process. The probing of the exposed aux device
+ * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending
+ * on when the kernel schedules it. Unless something goes terribly wrong, we're
+ * guaranteed for this to happen during boot, so the big timeout is a safety net
+ * that we never expect to need.
+ * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed
+ * and/or reset, this can take longer. Note that the kernel might schedule
+ * other work between the i915 init/resume and the MEI one, which can add to
+ * the delay.
+ */
+#define GSC_INIT_TIMEOUT_MS 10000
+#define PXP_INIT_TIMEOUT_MS 5000
+
+static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
+ enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+static void __delayed_huc_load_complete(struct intel_huc *huc)
+{
+ if (!i915_sw_fence_done(&huc->delayed_load.fence))
+ i915_sw_fence_complete(&huc->delayed_load.fence);
+}
+
+static void delayed_huc_load_complete(struct intel_huc *huc)
+{
+ hrtimer_cancel(&huc->delayed_load.timer);
+ __delayed_huc_load_complete(huc);
+}
+
+static void __gsc_init_error(struct intel_huc *huc)
+{
+ huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR;
+ __delayed_huc_load_complete(huc);
+}
+
+static void gsc_init_error(struct intel_huc *huc)
+{
+ hrtimer_cancel(&huc->delayed_load.timer);
+ __gsc_init_error(huc);
+}
+
+static void gsc_init_done(struct intel_huc *huc)
+{
+ hrtimer_cancel(&huc->delayed_load.timer);
+
+ /* MEI-GSC init is done, now we wait for MEI-PXP to bind */
+ huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP;
+ if (!i915_sw_fence_done(&huc->delayed_load.fence))
+ hrtimer_start(&huc->delayed_load.timer,
+ ms_to_ktime(PXP_INIT_TIMEOUT_MS),
+ HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer)
+{
+ struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer);
+
+ if (!intel_huc_is_authenticated(huc)) {
+ if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC)
+ drm_notice(&huc_to_gt(huc)->i915->drm,
+ "timed out waiting for MEI GSC init to load HuC\n");
+ else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP)
+ drm_notice(&huc_to_gt(huc)->i915->drm,
+ "timed out waiting for MEI PXP init to load HuC\n");
+ else
+ MISSING_CASE(huc->delayed_load.status);
+
+ __gsc_init_error(huc);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void huc_delayed_load_start(struct intel_huc *huc)
+{
+ ktime_t delay;
+
+ GEM_BUG_ON(intel_huc_is_authenticated(huc));
+
+ /*
+ * On resume we don't have to wait for MEI-GSC to be re-probed, but we
+ * do need to wait for MEI-PXP to reset & re-bind
+ */
+ switch (huc->delayed_load.status) {
+ case INTEL_HUC_WAITING_ON_GSC:
+ delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS);
+ break;
+ case INTEL_HUC_WAITING_ON_PXP:
+ delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS);
+ break;
+ default:
+ gsc_init_error(huc);
+ return;
+ }
+
+ /*
+ * This fence is always complete unless we're waiting for the
+ * GSC device to come up to load the HuC. We arm the fence here
+ * and complete it when we confirm that the HuC is loaded from
+ * the PXP bind callback.
+ */
+ GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence));
+ i915_sw_fence_fini(&huc->delayed_load.fence);
+ i915_sw_fence_reinit(&huc->delayed_load.fence);
+ i915_sw_fence_await(&huc->delayed_load.fence);
+ i915_sw_fence_commit(&huc->delayed_load.fence);
+
+ hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL);
+}
+
+static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb);
+ struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0];
+
+ if (!intf->adev || &intf->adev->aux_dev.dev != dev)
+ return 0;
+
+ switch (action) {
+ case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */
+ gsc_init_done(huc);
+ break;
+
+ case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */
+ case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */
+ drm_info(&huc_to_gt(huc)->i915->drm,
+ "mei driver not bound, disabling HuC load\n");
+ gsc_init_error(huc);
+ break;
+ }
+
+ return 0;
+}
+
+void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus)
+{
+ int ret;
+
+ if (!intel_huc_is_loaded_by_gsc(huc))
+ return;
+
+ huc->delayed_load.nb.notifier_call = gsc_notifier;
+ ret = bus_register_notifier(bus, &huc->delayed_load.nb);
+ if (ret) {
+ drm_err(&huc_to_gt(huc)->i915->drm,
+ "failed to register GSC notifier\n");
+ huc->delayed_load.nb.notifier_call = NULL;
+ gsc_init_error(huc);
+ }
+}
+
+void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus)
+{
+ if (!huc->delayed_load.nb.notifier_call)
+ return;
+
+ delayed_huc_load_complete(huc);
+
+ bus_unregister_notifier(bus, &huc->delayed_load.nb);
+ huc->delayed_load.nb.notifier_call = NULL;
+}
+
+static void delayed_huc_load_init(struct intel_huc *huc)
+{
+ /*
+ * Initialize fence to be complete as this is expected to be complete
+ * unless there is a delayed HuC load in progress.
+ */
+ i915_sw_fence_init(&huc->delayed_load.fence,
+ sw_fence_dummy_notify);
+ i915_sw_fence_commit(&huc->delayed_load.fence);
+
+ hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ huc->delayed_load.timer.function = huc_delayed_load_timer_callback;
+}
+
+static void delayed_huc_load_fini(struct intel_huc *huc)
+{
+ /*
+ * the fence is initialized in init_early, so we need to clean it up
+ * even if HuC loading is off.
+ */
+ delayed_huc_load_complete(huc);
+ i915_sw_fence_fini(&huc->delayed_load.fence);
+}
+
+static bool vcs_supported(struct intel_gt *gt)
+{
+ intel_engine_mask_t mask = gt->info.engine_mask;
+
+ /*
+ * We reach here from i915_driver_early_probe for the primary GT before
+ * its engine mask is set, so we use the device info engine mask for it;
+ * this means we're not taking VCS fusing into account, but if the
+ * primary GT supports VCS engines we expect at least one of them to
+ * remain unfused so we're fine.
+ * For other GTs we expect the GT-specific mask to be set before we
+ * call this function.
+ */
+ GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
+
+ if (gt_is_root(gt))
+ mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+ else
+ mask = gt->info.engine_mask;
+
+ return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS);
+}
+
void intel_huc_init_early(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
+ struct intel_gt *gt = huc_to_gt(huc);
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+ /*
+ * we always init the fence as already completed, even if HuC is not
+ * supported. This way we don't have to distinguish between HuC not
+ * supported/disabled or already loaded, and can focus on if the load
+ * is currently in progress (fence not complete) or not, which is what
+ * we care about for stalling userspace submissions.
+ */
+ delayed_huc_load_init(huc);
+
+ if (!vcs_supported(gt)) {
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+ return;
+ }
+
if (GRAPHICS_VER(i915) >= 11) {
huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO;
huc->status.mask = HUC_LOAD_SUCCESSFUL;
@@ -113,16 +344,59 @@ int intel_huc_init(struct intel_huc *huc)
return 0;
out:
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
drm_info(&i915->drm, "HuC init failed with %d\n", err);
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
+ /*
+ * the fence is initialized in init_early, so we need to clean it up
+ * even if HuC loading is off.
+ */
+ delayed_huc_load_fini(huc);
+
+ if (intel_uc_fw_is_loadable(&huc->fw))
+ intel_uc_fw_fini(&huc->fw);
+}
+
+void intel_huc_suspend(struct intel_huc *huc)
+{
if (!intel_uc_fw_is_loadable(&huc->fw))
return;
- intel_uc_fw_fini(&huc->fw);
+ /*
+ * in the unlikely case that we're suspending before the GSC has
+ * completed its loading sequence, just stop waiting. We'll restart
+ * on resume.
+ */
+ delayed_huc_load_complete(huc);
+}
+
+int intel_huc_wait_for_auth_complete(struct intel_huc *huc)
+{
+ struct intel_gt *gt = huc_to_gt(huc);
+ int ret;
+
+ ret = __intel_wait_for_register(gt->uncore,
+ huc->status.reg,
+ huc->status.mask,
+ huc->status.value,
+ 2, 50, NULL);
+
+ /* mark the load process as complete even if the wait failed */
+ delayed_huc_load_complete(huc);
+
+ if (ret) {
+ drm_err(&gt->i915->drm, "HuC: Firmware not verified %d\n", ret);
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+ return ret;
+ }
+
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ drm_info(&gt->i915->drm, "HuC authenticated\n");
+ return 0;
}
/**
@@ -161,27 +435,18 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(gt->uncore,
- huc->status.reg,
- huc->status.mask,
- huc->status.value,
- 2, 50, NULL);
- if (ret) {
- DRM_ERROR("HuC: Firmware not verified %d\n", ret);
+ ret = intel_huc_wait_for_auth_complete(huc);
+ if (ret)
goto fail;
- }
- intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
- drm_info(&gt->i915->drm, "HuC authenticated\n");
return 0;
fail:
i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret);
- intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
return ret;
}
-static bool huc_is_authenticated(struct intel_huc *huc)
+bool intel_huc_is_authenticated(struct intel_huc *huc)
{
struct intel_gt *gt = huc_to_gt(huc);
intel_wakeref_t wakeref;
@@ -200,13 +465,8 @@ static bool huc_is_authenticated(struct intel_huc *huc)
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns:
- * * -ENODEV if HuC is not present on this platform,
- * * -EOPNOTSUPP if HuC firmware is disabled,
- * * -ENOPKG if HuC firmware was not installed,
- * * -ENOEXEC if HuC firmware is invalid or mismatched,
- * * 0 if HuC firmware is not running,
- * * 1 if HuC firmware is authenticated and running.
+ * The return values match what is expected for the I915_PARAM_HUC_STATUS
+ * getparam.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
@@ -219,11 +479,21 @@ int intel_huc_check_status(struct intel_huc *huc)
return -ENOPKG;
case INTEL_UC_FIRMWARE_ERROR:
return -ENOEXEC;
+ case INTEL_UC_FIRMWARE_INIT_FAIL:
+ return -ENOMEM;
+ case INTEL_UC_FIRMWARE_LOAD_FAIL:
+ return -EIO;
default:
break;
}
- return huc_is_authenticated(huc);
+ return intel_huc_is_authenticated(huc);
+}
+
+static bool huc_has_delayed_load(struct intel_huc *huc)
+{
+ return intel_huc_is_loaded_by_gsc(huc) &&
+ (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR);
}
void intel_huc_update_auth_status(struct intel_huc *huc)
@@ -231,9 +501,11 @@ void intel_huc_update_auth_status(struct intel_huc *huc)
if (!intel_uc_fw_is_loadable(&huc->fw))
return;
- if (huc_is_authenticated(huc))
+ if (intel_huc_is_authenticated(huc))
intel_uc_fw_change_status(&huc->fw,
INTEL_UC_FIRMWARE_RUNNING);
+ else if (huc_has_delayed_load(huc))
+ huc_delayed_load_start(huc);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index d7e25b6e879e..52db03620c60 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -7,9 +7,21 @@
#define _INTEL_HUC_H_
#include "i915_reg_defs.h"
+#include "i915_sw_fence.h"
#include "intel_uc_fw.h"
#include "intel_huc_fw.h"
+#include <linux/notifier.h>
+#include <linux/hrtimer.h>
+
+struct bus_type;
+
+enum intel_huc_delayed_load_status {
+ INTEL_HUC_WAITING_ON_GSC = 0,
+ INTEL_HUC_WAITING_ON_PXP,
+ INTEL_HUC_DELAYED_LOAD_ERROR,
+};
+
struct intel_huc {
/* Generic uC firmware management */
struct intel_uc_fw fw;
@@ -20,14 +32,27 @@ struct intel_huc {
u32 mask;
u32 value;
} status;
+
+ struct {
+ struct i915_sw_fence fence;
+ struct hrtimer timer;
+ struct notifier_block nb;
+ enum intel_huc_delayed_load_status status;
+ } delayed_load;
};
void intel_huc_init_early(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
+void intel_huc_suspend(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
+int intel_huc_wait_for_auth_complete(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
void intel_huc_update_auth_status(struct intel_huc *huc);
+bool intel_huc_is_authenticated(struct intel_huc *huc);
+
+void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
+void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus);
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
@@ -56,6 +81,12 @@ static inline bool intel_huc_is_loaded_by_gsc(const struct intel_huc *huc)
return huc->fw.loaded_via_gsc;
}
+static inline bool intel_huc_wait_required(struct intel_huc *huc)
+{
+ return intel_huc_is_used(huc) && intel_huc_is_loaded_by_gsc(huc) &&
+ !intel_huc_is_authenticated(huc);
+}
+
void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 9d6ab1e01639..4f246416db17 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -3,9 +3,43 @@
* Copyright © 2014-2019 Intel Corporation
*/
+#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
+#include "intel_huc.h"
#include "intel_huc_fw.h"
#include "i915_drv.h"
+#include "pxp/intel_pxp_huc.h"
+
+int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc)
+{
+ int ret;
+
+ if (!intel_huc_is_loaded_by_gsc(huc))
+ return -ENODEV;
+
+ if (!intel_uc_fw_is_loadable(&huc->fw))
+ return -ENOEXEC;
+
+ /*
+ * If we abort a suspend, HuC might still be loaded when the mei
+ * component gets re-bound and this function called again. If so, just
+ * mark the HuC as loaded.
+ */
+ if (intel_huc_is_authenticated(huc)) {
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING);
+ return 0;
+ }
+
+ GEM_WARN_ON(intel_uc_fw_is_loaded(&huc->fw));
+
+ ret = intel_pxp_huc_load_and_auth(&huc_to_gt(huc)->pxp);
+ if (ret)
+ return ret;
+
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+
+ return intel_huc_wait_for_auth_complete(huc);
+}
/**
* intel_huc_fw_upload() - load HuC uCode to device via DMA transfer
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index 12f264ee3e0b..db42e238b45f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
@@ -8,6 +8,7 @@
struct intel_huc;
+int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc);
int intel_huc_fw_upload(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index dbd048b77e19..2a508b137e90 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -357,8 +357,8 @@ static int uc_init_wopcm(struct intel_uc *uc)
{
struct intel_gt *gt = uc_to_gt(uc);
struct intel_uncore *uncore = gt->uncore;
- u32 base = intel_wopcm_guc_base(&gt->i915->wopcm);
- u32 size = intel_wopcm_guc_size(&gt->i915->wopcm);
+ u32 base = intel_wopcm_guc_base(&gt->wopcm);
+ u32 size = intel_wopcm_guc_size(&gt->wopcm);
u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0;
u32 mask;
int err;
@@ -636,8 +636,10 @@ void intel_uc_runtime_suspend(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_ready(guc))
+ if (!intel_guc_is_ready(guc)) {
+ guc->interrupts.enabled = false;
return;
+ }
/*
* Wait for any outstanding CTB before tearing down communication /w the
@@ -657,8 +659,10 @@ void intel_uc_suspend(struct intel_uc *uc)
intel_wakeref_t wakeref;
int err;
- if (!intel_guc_is_ready(guc))
+ if (!intel_guc_is_ready(guc)) {
+ guc->interrupts.enabled = false;
return;
+ }
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
err = intel_guc_suspend(guc);
@@ -718,6 +722,7 @@ int intel_uc_runtime_resume(struct intel_uc *uc)
static const struct intel_uc_ops uc_ops_off = {
.init_hw = __uc_check_hw,
+ .fini = __uc_fini, /* to clean-up the init_early initialization */
};
static const struct intel_uc_ops uc_ops_on = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index b91ad4aede1f..0c80ba51a4bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -93,7 +93,8 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(BROXTON, 0, guc_mmp(bxt, 70, 1, 1)) \
fw_def(SKYLAKE, 0, guc_mmp(skl, 70, 1, 1))
-#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp) \
+#define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_raw, huc_mmp, huc_gsc) \
+ fw_def(DG2, 0, huc_gsc(dg2)) \
fw_def(ALDERLAKE_P, 0, huc_raw(tgl)) \
fw_def(ALDERLAKE_P, 0, huc_mmp(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, huc_raw(tgl)) \
@@ -141,6 +142,9 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
__MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
+#define MAKE_HUC_FW_PATH_GSC(prefix_) \
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc_gsc")
+
#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
__MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
@@ -153,7 +157,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
MODULE_FIRMWARE(uc_);
INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH_MAJOR, MAKE_GUC_FW_PATH_MMP)
-INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP)
+INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH_BLANK, MAKE_HUC_FW_PATH_MMP, MAKE_HUC_FW_PATH_GSC)
/*
* The next expansion of the table macros (in __uc_fw_auto_select below) provides
@@ -168,6 +172,7 @@ struct __packed uc_fw_blob {
u8 major;
u8 minor;
u8 patch;
+ bool loaded_via_gsc;
};
#define UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
@@ -176,16 +181,16 @@ struct __packed uc_fw_blob {
.patch = patch_, \
.path = path_,
-#define UC_FW_BLOB_NEW(major_, minor_, patch_, path_) \
+#define UC_FW_BLOB_NEW(major_, minor_, patch_, gsc_, path_) \
{ UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
- .legacy = false }
+ .legacy = false, .loaded_via_gsc = gsc_ }
#define UC_FW_BLOB_OLD(major_, minor_, patch_, path_) \
{ UC_FW_BLOB_BASE(major_, minor_, patch_, path_) \
.legacy = true }
#define GUC_FW_BLOB(prefix_, major_, minor_) \
- UC_FW_BLOB_NEW(major_, minor_, 0, \
+ UC_FW_BLOB_NEW(major_, minor_, 0, false, \
MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_))
#define GUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
@@ -193,12 +198,15 @@ struct __packed uc_fw_blob {
MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
#define HUC_FW_BLOB(prefix_) \
- UC_FW_BLOB_NEW(0, 0, 0, MAKE_HUC_FW_PATH_BLANK(prefix_))
+ UC_FW_BLOB_NEW(0, 0, 0, false, MAKE_HUC_FW_PATH_BLANK(prefix_))
#define HUC_FW_BLOB_MMP(prefix_, major_, minor_, patch_) \
UC_FW_BLOB_OLD(major_, minor_, patch_, \
MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_))
+#define HUC_FW_BLOB_GSC(prefix_) \
+ UC_FW_BLOB_NEW(0, 0, 0, true, MAKE_HUC_FW_PATH_GSC(prefix_))
+
struct __packed uc_fw_platform_requirement {
enum intel_platform p;
u8 rev; /* first platform rev using this FW */
@@ -224,7 +232,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, GUC_FW_BLOB_MMP)
};
static const struct uc_fw_platform_requirement blobs_huc[] = {
- INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP)
+ INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB, HUC_FW_BLOB_MMP, HUC_FW_BLOB_GSC)
};
static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
@@ -272,6 +280,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->file_wanted.path = blob->path;
uc_fw->file_wanted.major_ver = blob->major;
uc_fw->file_wanted.minor_ver = blob->minor;
+ uc_fw->loaded_via_gsc = blob->loaded_via_gsc;
found = true;
break;
}
@@ -469,10 +478,11 @@ static int check_gsc_manifest(const struct firmware *fw,
return 0;
}
-static int check_ccs_header(struct drm_i915_private *i915,
+static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
{
+ struct drm_i915_private *i915 = gt->i915;
struct uc_css_header *css;
size_t size;
@@ -514,10 +524,10 @@ static int check_ccs_header(struct drm_i915_private *i915,
/* Sanity check whether this fw is not larger than whole WOPCM memory */
size = __intel_uc_fw_get_upload_size(uc_fw);
- if (unlikely(size >= i915->wopcm.size)) {
+ if (unlikely(size >= gt->wopcm.size)) {
drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- size, (size_t)i915->wopcm.size);
+ size, (size_t)gt->wopcm.size);
return -E2BIG;
}
@@ -545,7 +555,8 @@ static int check_ccs_header(struct drm_i915_private *i915,
*/
int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
{
- struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct drm_i915_private *i915 = gt->i915;
struct intel_uc_fw_file file_ideal;
struct device *dev = i915->drm.dev;
struct drm_i915_gem_object *obj;
@@ -553,7 +564,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
bool old_ver = false;
int err;
- GEM_BUG_ON(!i915->wopcm.size);
+ GEM_BUG_ON(!gt->wopcm.size);
GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
err = i915_inject_probe_error(i915, -ENXIO);
@@ -566,6 +577,17 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
+ if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
+ drm_err(&i915->drm,
+ "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+
+ /* try to find another blob to load */
+ release_firmware(fw);
+ err = -ENOENT;
+ }
+
/* Any error is terminal if overriding. Don't bother searching for older versions */
if (err && intel_uc_fw_is_overridden(uc_fw))
goto fail;
@@ -595,7 +617,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->loaded_via_gsc)
err = check_gsc_manifest(fw, uc_fw);
else
- err = check_ccs_header(i915, fw, uc_fw);
+ err = check_ccs_header(gt, fw, uc_fw);
if (err)
goto fail;
@@ -668,14 +690,30 @@ fail:
static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
{
- struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+ struct i915_ggtt *ggtt = gt->ggtt;
struct drm_mm_node *node = &ggtt->uc_fw;
+ u32 offset = uc_fw->type * INTEL_UC_RSVD_GGTT_PER_FW;
+
+ /*
+ * The media GT shares the GGTT with the root GT, which means that
+ * we need to use different offsets for the binaries on the media GT.
+ * To keep the math simple, we use 8MB for the root tile and 8MB for
+ * the media one. This will need to be updated if we ever have more
+ * than 1 media GT.
+ */
+ BUILD_BUG_ON(INTEL_UC_FW_NUM_TYPES * INTEL_UC_RSVD_GGTT_PER_FW > SZ_8M);
+ GEM_BUG_ON(gt->type == GT_MEDIA && gt->info.id > 1);
+ if (gt->type == GT_MEDIA)
+ offset += SZ_8M;
GEM_BUG_ON(!drm_mm_node_allocated(node));
GEM_BUG_ON(upper_32_bits(node->start));
GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+ GEM_BUG_ON(offset + uc_fw->obj->base.size > node->size);
+ GEM_BUG_ON(uc_fw->obj->base.size > INTEL_UC_RSVD_GGTT_PER_FW);
- return lower_32_bits(node->start);
+ return lower_32_bits(node->start + offset);
}
static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
@@ -690,7 +728,6 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
dummy->bi.pages = obj->mm.pages;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
if (i915_gem_object_has_struct_page(obj))
@@ -904,7 +941,6 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
out_unpin:
i915_gem_object_unpin_pages(uc_fw->obj);
out:
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index cb586f7df270..bc898ba5355d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -6,6 +6,7 @@
#ifndef _INTEL_UC_FW_H_
#define _INTEL_UC_FW_H_
+#include <linux/sizes.h>
#include <linux/types.h>
#include "intel_uc_fw_abi.h"
#include "intel_device_info.h"
@@ -114,6 +115,19 @@ struct intel_uc_fw {
(uc)->fw.file_selected.minor_ver, \
(uc)->fw.file_selected.patch_ver))
+/*
+ * When we load the uC binaries, we pin them in a reserved section at the top of
+ * the GGTT, which is ~18 MBs. On multi-GT systems where the GTs share the GGTT,
+ * we also need to make sure that each binary is pinned to a unique location
+ * during load, because the different GT can go through the FW load at the same
+ * time (see uc_fw_ggtt_offset() for details).
+ * Given that the available space is much greater than what is required by the
+ * binaries, to keep things simple instead of dynamically partitioning the
+ * reserved section to make space for all the blobs we can just reserve a static
+ * chunk for each binary.
+ */
+#define INTEL_UC_RSVD_GGTT_PER_FW SZ_2M
+
#ifdef CONFIG_DRM_I915_DEBUG_GUC
void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
enum intel_uc_fw_status status);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 01f8cd3c3134..d91b58f70403 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -35,11 +35,14 @@ static int intel_hang_guc(void *arg)
struct i915_request *rq;
intel_wakeref_t wakeref;
struct i915_gpu_error *global = &gt->i915->gpu_error;
- struct intel_engine_cs *engine;
+ struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
unsigned int reset_count;
u32 guc_status;
u32 old_beat;
+ if (!engine)
+ return 0;
+
ctx = kernel_context(gt->i915, NULL);
if (IS_ERR(ctx)) {
drm_err(&gt->i915->drm, "Failed get kernel context: %ld\n", PTR_ERR(ctx));
@@ -48,14 +51,13 @@ static int intel_hang_guc(void *arg)
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
- ce = intel_context_create(gt->engine[BCS0]);
+ ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
drm_err(&gt->i915->drm, "Failed to create spinner request: %d\n", ret);
goto err;
}
- engine = ce->engine;
reset_count = i915_reset_count(global);
old_beat = engine->props.heartbeat_interval_ms;