summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt/intel_gt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_gt.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c652
1 files changed, 405 insertions, 247 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f98f0fb21efb..3d3b1ba76e2b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,31 +3,43 @@
* Copyright © 2019 Intel Corporation
*/
-#include <drm/intel-gtt.h>
-
-#include "intel_gt_debugfs.h"
+#include <drm/drm_managed.h>
+#include <drm/intel/intel-gtt.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+
#include "i915_drv.h"
+#include "i915_perf_oa_regs.h"
+#include "i915_reg.h"
#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_regs.h"
+#include "intel_ggtt_gmch.h"
#include "intel_gt.h"
#include "intel_gt_buffer_pool.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
+#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
-#include "intel_pm.h"
+#include "intel_pci_config.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
+#include "intel_sa_media.h"
+#include "intel_gt_sysfs.h"
+#include "intel_tlb.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
-#include "pxp/intel_pxp.h"
-void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+void intel_gt_common_init_early(struct intel_gt *gt)
{
- spin_lock_init(&gt->irq_lock);
+ spin_lock_init(gt->irq_lock);
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -39,42 +51,57 @@ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
intel_gt_init_reset(gt);
intel_gt_init_requests(gt);
intel_gt_init_timelines(gt);
+ intel_gt_init_tlb(gt);
intel_gt_pm_init_early(gt);
+ intel_wopcm_init_early(&gt->wopcm);
intel_uc_init_early(&gt->uc);
intel_rps_init_early(&gt->rps);
}
-void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+/* Preliminary initialization of Tile 0 */
+int intel_root_gt_init_early(struct drm_i915_private *i915)
{
+ struct intel_gt *gt;
+
+ gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt)
+ return -ENOMEM;
+
+ i915->gt[0] = gt;
+
gt->i915 = i915;
gt->uncore = &i915->uncore;
+ gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
+ if (!gt->irq_lock)
+ return -ENOMEM;
+
+ intel_gt_common_init_early(gt);
+
+ return 0;
}
-int intel_gt_probe_lmem(struct intel_gt *gt)
+static int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
+ unsigned int instance = gt->info.id;
+ int id = INTEL_REGION_LMEM_0 + instance;
struct intel_memory_region *mem;
- int id;
int err;
mem = intel_gt_setup_lmem(gt);
- if (mem == ERR_PTR(-ENODEV))
- mem = intel_gt_setup_fake_lmem(gt);
if (IS_ERR(mem)) {
err = PTR_ERR(mem);
if (err == -ENODEV)
return 0;
- drm_err(&i915->drm,
- "Failed to setup region(%d) type=%d\n",
- err, INTEL_MEMORY_LOCAL);
+ gt_err(gt, "Failed to setup region(%d) type=%d\n",
+ err, INTEL_MEMORY_LOCAL);
return err;
}
- id = INTEL_REGION_LMEM;
-
mem->id = id;
+ mem->instance = instance;
intel_memory_region_set_name(mem, "local%u", mem->instance);
@@ -85,77 +112,29 @@ int intel_gt_probe_lmem(struct intel_gt *gt)
return 0;
}
-void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
+int intel_gt_assign_ggtt(struct intel_gt *gt)
{
- gt->ggtt = ggtt;
-}
-
-static const struct intel_mmio_range icl_l3bank_steering_table[] = {
- { 0x00B100, 0x00B3FF },
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
- { 0x004000, 0x004AFF },
- { 0x00C800, 0x00CFFF },
- { 0x00DD00, 0x00DDFF },
- { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
- {},
-};
-
-static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D800, 0x00D8FF },
- {},
-};
-
-static const struct intel_mmio_range dg2_lncf_steering_table[] = {
- { 0x00B000, 0x00B0FF },
- { 0x00D880, 0x00D8FF },
- {},
-};
+ /* Media GT shares primary GT's GGTT */
+ if (gt->type == GT_MEDIA) {
+ gt->ggtt = to_gt(gt->i915)->ggtt;
+ } else {
+ gt->ggtt = i915_ggtt_create(gt->i915);
+ if (IS_ERR(gt->ggtt))
+ return PTR_ERR(gt->ggtt);
+ }
-static u16 slicemask(struct intel_gt *gt, int count)
-{
- u64 dss_mask = intel_sseu_get_subslices(&gt->info.sseu, 0);
+ list_add_tail(&gt->ggtt_link, &gt->ggtt->gt_list);
- return intel_slicemask_from_dssmask(dss_mask, count);
+ return 0;
}
int intel_gt_init_mmio(struct intel_gt *gt)
{
- struct drm_i915_private *i915 = gt->i915;
-
intel_gt_init_clock_frequency(gt);
intel_uc_init_mmio(&gt->uc);
intel_sseu_info_init(gt);
-
- /*
- * An mslice is unavailable only if both the meml3 for the slice is
- * disabled *and* all of the DSS in the slice (quadrant) are disabled.
- */
- if (HAS_MSLICES(i915))
- gt->info.mslice_mask =
- slicemask(gt, GEN_DSS_PER_MSLICE) |
- (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN12_MEML3_EN_MASK);
-
- if (IS_DG2(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = dg2_lncf_steering_table;
- } else if (IS_XEHPSDV(i915)) {
- gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
- gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
- } else if (GRAPHICS_VER(i915) >= 11 &&
- GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
- gt->steering_table[L3BANK] = icl_l3bank_steering_table;
- gt->info.l3bank_mask =
- ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
- GEN10_L3BANK_MASK;
- } else if (HAS_MSLICES(i915)) {
- MISSING_CASE(INTEL_INFO(i915)->platform);
- }
+ intel_gt_mcr_init(gt);
return intel_engines_init_mmio(gt);
}
@@ -205,8 +184,8 @@ int intel_gt_init_hw(struct intel_gt *gt)
if (IS_HASWELL(i915))
intel_uncore_write(uncore,
- MI_PREDICATE_RESULT_2,
- IS_HSW_GT3(i915) ?
+ HSW_MI_PREDICATE_RESULT_2,
+ INTEL_INFO(i915)->gt == 3 ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
@@ -226,14 +205,14 @@ int intel_gt_init_hw(struct intel_gt *gt)
ret = i915_ppgtt_init_hw(gt);
if (ret) {
- DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
+ gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(&gt->uc);
if (ret) {
- i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
goto out;
}
@@ -244,25 +223,20 @@ out:
return ret;
}
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
{
- intel_uncore_rmw(uncore, reg, 0, set);
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
}
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt)
{
- intel_uncore_rmw(uncore, reg, clr, 0);
-}
+ /* GT0_PERF_LIMIT_REASONS is available only for Gen11+ */
+ if (GRAPHICS_VER(gt->i915) < 11)
+ return INVALID_MMIO_REG;
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
- intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
-static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
-{
- GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
- GEN6_RING_FAULT_REG_POSTING_READ(engine);
+ return gt->type == GT_MEDIA ?
+ MTL_MEDIA_PERF_LIMIT_REASONS : GT0_PERF_LIMIT_REASONS;
}
void
@@ -274,31 +248,46 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
u32 eir;
if (GRAPHICS_VER(i915) != 2)
- clear_register(uncore, PGTBL_ER);
+ intel_uncore_write(uncore, PGTBL_ER, 0);
if (GRAPHICS_VER(i915) < 4)
- clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
else
- clear_register(uncore, IPEIR_I965);
+ intel_uncore_write(uncore, IPEIR_I965, 0);
- clear_register(uncore, EIR);
+ intel_uncore_write(uncore, EIR, 0);
eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- rmw_set(uncore, EMR, eir);
+ gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
+ intel_uncore_rmw(uncore, EMR, 0, eir);
intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT);
}
- if (GRAPHICS_VER(i915) >= 12) {
- rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ /*
+ * For the media GT, this ring fault register is not replicated,
+ * so don't do multicast/replicated register read/write operation on it.
+ */
+ if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
+ intel_uncore_rmw(uncore, XELPMP_RING_FAULT_REG,
+ RING_FAULT_VALID, 0);
+ intel_uncore_posting_read(uncore,
+ XELPMP_RING_FAULT_REG);
+
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+ intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
+ RING_FAULT_VALID, 0);
+ intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
+
+ } else if (GRAPHICS_VER(i915) >= 12) {
+ intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 8) {
- rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 6) {
struct intel_engine_cs *engine;
@@ -313,25 +302,66 @@ static void gen6_check_faults(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 fault;
for_each_engine(engine, gt, id) {
+ u32 fault;
+
fault = GEN6_RING_FAULT_REG_READ(engine);
+
if (fault & RING_FAULT_VALID) {
- drm_dbg(&engine->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ?
- "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & RING_FAULT_VADDR_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
+ REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
}
}
}
+static void gen8_report_fault(struct intel_gt *gt, u32 fault,
+ u32 fault_data0, u32 fault_data1)
+{
+ u64 fault_addr;
+
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ REG_FIELD_GET(RING_FAULT_ENGINE_ID_MASK, fault),
+ REG_FIELD_GET(RING_FAULT_SRCID_MASK, fault),
+ REG_FIELD_GET(RING_FAULT_FAULT_TYPE_MASK, fault));
+}
+
+static void xehp_check_faults(struct intel_gt *gt)
+{
+ u32 fault;
+
+ /*
+ * Although the fault register now lives in an MCR register range,
+ * the GAM registers are special and we only truly need to read
+ * the "primary" GAM instance rather than handling each instance
+ * individually. intel_gt_mcr_read_any() will automatically steer
+ * toward the primary instance.
+ */
+ fault = intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
+ if (fault & RING_FAULT_VALID)
+ gen8_report_fault(gt, fault,
+ intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA0),
+ intel_gt_mcr_read_any(gt, XEHP_FAULT_TLB_DATA1));
+}
+
static void gen8_check_faults(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
@@ -349,28 +379,10 @@ static void gen8_check_faults(struct intel_gt *gt)
}
fault = intel_uncore_read(uncore, fault_reg);
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
- fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
-
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
+ if (fault & RING_FAULT_VALID)
+ gen8_report_fault(gt, fault,
+ intel_uncore_read(uncore, fault_data0_reg),
+ intel_uncore_read(uncore, fault_data1_reg));
}
void intel_gt_check_and_clear_faults(struct intel_gt *gt)
@@ -378,7 +390,9 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (GRAPHICS_VER(i915) >= 8)
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
+ xehp_check_faults(gt);
+ else if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 6)
gen6_check_faults(gt);
@@ -424,7 +438,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
+ RING_TAIL(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@@ -433,14 +447,17 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
{
wmb();
if (GRAPHICS_VER(gt->i915) < 6)
- intel_gtt_chipset_flush();
+ intel_ggtt_gmch_flush();
}
void intel_gt_driver_register(struct intel_gt *gt)
{
+ intel_gsc_init(&gt->gsc, gt->i915);
+
intel_rps_driver_register(&gt->rps);
intel_gt_debugfs_register(gt);
+ intel_gt_sysfs_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -450,13 +467,15 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
struct i915_vma *vma;
int ret;
- obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
- if (IS_ERR(obj))
+ obj = i915_gem_object_create_lmem(i915, size,
+ I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj) && !IS_METEORLAKE(i915)) /* Wa_22018444074 */
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
- drm_err(&i915->drm, "Failed to allocate scratch page\n");
+ gt_err(gt, "Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
@@ -654,8 +673,13 @@ int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
return -EINTR;
}
- return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc,
- remaining_timeout);
+ if (timeout)
+ return timeout;
+
+ if (remaining_timeout < 0)
+ remaining_timeout = 0;
+
+ return intel_uc_wait_for_idle(&gt->uc, remaining_timeout);
}
int intel_gt_init(struct intel_gt *gt)
@@ -704,6 +728,10 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_uc_init;
+ err = intel_gt_init_hwconfig(gt);
+ if (err)
+ gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
+
err = __engines_record_defaults(gt);
if (err)
goto err_gt;
@@ -712,15 +740,13 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_gt;
- intel_uc_init_late(&gt->uc);
-
err = i915_inject_probe_error(gt->i915, -EIO);
if (err)
goto err_gt;
- intel_migrate_init(&gt->migrate, gt);
+ intel_uc_init_late(&gt->uc);
- intel_pxp_init(&gt->pxp);
+ intel_migrate_init(&gt->migrate, gt);
goto out_fw;
err_gt:
@@ -757,9 +783,32 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
+ intel_gt_sysfs_unregister(gt);
intel_rps_driver_unregister(&gt->rps);
+ intel_gsc_fini(&gt->gsc);
- intel_pxp_fini(&gt->pxp);
+ /*
+ * If we unload the driver and wedge before the GSC worker is complete,
+ * the worker will hit an error on its submission to the GSC engine and
+ * then exit. This is hard to hit for a user, but it is reproducible
+ * with skipping selftests. The error is handled gracefully by the
+ * worker, so there are no functional issues, but we still end up with
+ * an error message in dmesg, which is something we want to avoid as
+ * this is a supported scenario. We could modify the worker to better
+ * handle a wedging occurring during its execution, but that gets
+ * complicated for a couple of reasons:
+ * - We do want the error on runtime wedging, because there are
+ * implications for subsystems outside of GT (i.e., PXP, HDCP), it's
+ * only the error on driver unload that we want to silence.
+ * - The worker is responsible for multiple submissions (GSC FW load,
+ * HuC auth, SW proxy), so all of those will have to be adapted to
+ * handle the wedged_on_fini scenario.
+ * Therefore, it's much simpler to just wait for the worker to be done
+ * before wedging on driver removal, also considering that the worker
+ * will likely already be idle in the great majority of non-selftest
+ * scenarios.
+ */
+ intel_gsc_uc_flush_work(&gt->uc.gsc);
/*
* Upon unregistering the device to prevent any new users, cancel
@@ -770,7 +819,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_reset(gt, ALL_ENGINES);
+ intel_gt_reset_all_engines(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -785,130 +834,239 @@ void intel_gt_driver_release(struct intel_gt *gt)
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
+ intel_gt_fini_hwconfig(gt);
}
-void intel_gt_driver_late_release(struct intel_gt *gt)
+void intel_gt_driver_late_release_all(struct drm_i915_private *i915)
{
+ struct intel_gt *gt;
+ unsigned int id;
+
/* We need to wait for inflight RCU frees to release their grip */
rcu_barrier();
- intel_uc_driver_late_release(&gt->uc);
- intel_gt_fini_requests(gt);
- intel_gt_fini_reset(gt);
- intel_gt_fini_timelines(gt);
- intel_engines_free(gt);
+ for_each_gt(gt, i915, id) {
+ intel_uc_driver_late_release(&gt->uc);
+ intel_gt_fini_requests(gt);
+ intel_gt_fini_reset(gt);
+ intel_gt_fini_timelines(gt);
+ intel_gt_fini_tlb(gt);
+ intel_engines_free(gt);
+ }
}
-/**
- * intel_gt_reg_needs_read_steering - determine whether a register read
- * requires explicit steering
- * @gt: GT structure
- * @reg: the register to check steering requirements for
- * @type: type of multicast steering to check
- *
- * Determines whether @reg needs explicit steering of a specific type for
- * reads.
- *
- * Returns false if @reg does not belong to a register range of the given
- * steering type, or if the default (subslice-based) steering IDs are suitable
- * for @type steering too.
- */
-static bool intel_gt_reg_needs_read_steering(struct intel_gt *gt,
- i915_reg_t reg,
- enum intel_steering_type type)
+static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
{
- const u32 offset = i915_mmio_reg_offset(reg);
- const struct intel_mmio_range *entry;
+ int ret;
+
+ if (!gt_is_root(gt)) {
+ struct intel_uncore *uncore;
+ spinlock_t *irq_lock;
+
+ uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
- if (likely(!intel_gt_needs_read_steering(gt, type)))
- return false;
+ irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
+ if (!irq_lock)
+ return -ENOMEM;
- for (entry = gt->steering_table[type]; entry->end; entry++) {
- if (offset >= entry->start && offset <= entry->end)
- return true;
+ gt->uncore = uncore;
+ gt->irq_lock = irq_lock;
+
+ intel_gt_common_init_early(gt);
}
- return false;
+ intel_uncore_init_early(gt->uncore, gt);
+
+ ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
+ if (ret)
+ return ret;
+
+ gt->phys_addr = phys_addr;
+
+ return 0;
+}
+
+int intel_gt_probe_all(struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct intel_gt *gt = to_gt(i915);
+ const struct intel_gt_definition *gtdef;
+ phys_addr_t phys_addr;
+ unsigned int mmio_bar;
+ unsigned int i;
+ int ret;
+
+ mmio_bar = intel_mmio_bar(GRAPHICS_VER(i915));
+ phys_addr = pci_resource_start(pdev, mmio_bar);
+
+ /*
+ * We always have at least one primary GT on any device
+ * and it has been already initialized early during probe
+ * in i915_driver_probe()
+ */
+ gt->i915 = i915;
+ gt->name = "Primary GT";
+ gt->info.engine_mask = INTEL_INFO(i915)->platform_engine_mask;
+
+ gt_dbg(gt, "Setting up %s\n", gt->name);
+ ret = intel_gt_tile_setup(gt, phys_addr);
+ if (ret)
+ return ret;
+
+ if (!HAS_EXTRA_GT_LIST(i915))
+ return 0;
+
+ for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
+ gtdef->name != NULL;
+ i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
+ gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
+ if (!gt) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gt->i915 = i915;
+ gt->name = gtdef->name;
+ gt->type = gtdef->type;
+ gt->info.engine_mask = gtdef->engine_mask;
+ gt->info.id = i;
+
+ gt_dbg(gt, "Setting up %s\n", gt->name);
+ if (GEM_WARN_ON(range_overflows_t(resource_size_t,
+ gtdef->mapping_base,
+ SZ_16M,
+ pci_resource_len(pdev, mmio_bar)))) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ switch (gtdef->type) {
+ case GT_TILE:
+ ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
+ break;
+
+ case GT_MEDIA:
+ ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
+ gtdef->gsi_offset);
+ break;
+
+ case GT_PRIMARY:
+ /* Primary GT should not appear in extra GT list */
+ default:
+ MISSING_CASE(gtdef->type);
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ goto err;
+
+ i915->gt[i] = gt;
+ }
+
+ return 0;
+
+err:
+ i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
+ return ret;
+}
+
+int intel_gt_tiles_init(struct drm_i915_private *i915)
+{
+ struct intel_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, i915, id) {
+ ret = intel_gt_probe_lmem(gt);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_gt_info_print(const struct intel_gt_info *info,
+ struct drm_printer *p)
+{
+ drm_printf(p, "available engines: %x\n", info->engine_mask);
+
+ intel_sseu_dump(&info->sseu, p);
+}
+
+enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
+ struct drm_i915_gem_object *obj,
+ bool always_coherent)
+{
+ /*
+ * Wa_22016122933: always return I915_MAP_WC for Media
+ * version 13.0 when the object is on the Media GT
+ */
+ if (i915_gem_object_is_lmem(obj) || intel_gt_needs_wa_22016122933(gt))
+ return I915_MAP_WC;
+ if (HAS_LLC(gt->i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
+}
+
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
+bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
+{
+ return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
+}
+
+static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine && engine->bind_context)
+ engine->bind_context_ready = ready;
}
/**
- * intel_gt_get_valid_steering - determines valid IDs for a class of MCR steering
+ * intel_gt_bind_context_set_ready - Set the context binding as ready
+ *
* @gt: GT structure
- * @type: multicast register type
- * @sliceid: Slice ID returned
- * @subsliceid: Subslice ID returned
*
- * Determines sliceid and subsliceid values that will steer reads
- * of a specific multicast register class to a valid value.
+ * This function marks the binder context as ready.
*/
-static void intel_gt_get_valid_steering(struct intel_gt *gt,
- enum intel_steering_type type,
- u8 *sliceid, u8 *subsliceid)
-{
- switch (type) {
- case L3BANK:
- GEM_DEBUG_WARN_ON(!gt->info.l3bank_mask); /* should be impossible! */
-
- *sliceid = 0; /* unused */
- *subsliceid = __ffs(gt->info.l3bank_mask);
- break;
- case MSLICE:
- GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
-
- *sliceid = __ffs(gt->info.mslice_mask);
- *subsliceid = 0; /* unused */
- break;
- case LNCF:
- GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
-
- /*
- * An LNCF is always present if its mslice is present, so we
- * can safely just steer to LNCF 0 in all cases.
- */
- *sliceid = __ffs(gt->info.mslice_mask) << 1;
- *subsliceid = 0; /* unused */
- break;
- default:
- MISSING_CASE(type);
- *sliceid = 0;
- *subsliceid = 0;
- }
+void intel_gt_bind_context_set_ready(struct intel_gt *gt)
+{
+ __intel_gt_bind_context_set_ready(gt, true);
}
/**
- * intel_gt_read_register_fw - reads a GT register with support for multicast
+ * intel_gt_bind_context_set_unready - Set the context binding as ready
* @gt: GT structure
- * @reg: register to read
*
- * This function will read a GT register. If the register is a multicast
- * register, the read will be steered to a valid instance (i.e., one that
- * isn't fused off or powered down by power gating).
- *
- * Returns the value from a valid instance of @reg.
+ * This function marks the binder context as not ready.
*/
-u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
-{
- int type;
- u8 sliceid, subsliceid;
-
- for (type = 0; type < NUM_STEERING_TYPES; type++) {
- if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
- intel_gt_get_valid_steering(gt, type, &sliceid,
- &subsliceid);
- return intel_uncore_read_with_mcr_steering_fw(gt->uncore,
- reg,
- sliceid,
- subsliceid);
- }
- }
- return intel_uncore_read_fw(gt->uncore, reg);
+void intel_gt_bind_context_set_unready(struct intel_gt *gt)
+{
+ __intel_gt_bind_context_set_ready(gt, false);
}
-void intel_gt_info_print(const struct intel_gt_info *info,
- struct drm_printer *p)
+/**
+ * intel_gt_is_bind_context_ready - Check if context binding is ready
+ *
+ * @gt: GT structure
+ *
+ * This function returns binder context's ready status.
+ */
+bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
{
- drm_printf(p, "available engines: %x\n", info->engine_mask);
+ struct intel_engine_cs *engine = gt->engine[BCS0];
- intel_sseu_dump(&info->sseu, p);
+ if (engine)
+ return engine->bind_context_ready;
+
+ return false;
}