summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c69
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c235
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c62
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_print.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c8
25 files changed, 472 insertions, 73 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index b58c30ac8ef0..40269e4c1e31 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -170,6 +170,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
+#define I915_GEM_HWS_GGTT_BIND 0x46
+#define I915_GEM_HWS_GGTT_BIND_ADDR (I915_GEM_HWS_GGTT_BIND * sizeof(u32))
#define I915_GEM_HWS_PXP 0x60
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
#define I915_GEM_HWS_GSC 0x62
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 84a75c95f3f7..179d9546865b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -316,10 +316,9 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
* out in the wash.
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
- drm_dbg(&gt->i915->drm,
- "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
- GRAPHICS_VER(gt->i915), cxt_size * 64,
- cxt_size - 1);
+ gt_dbg(gt, "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n",
+ GRAPHICS_VER(gt->i915), cxt_size * 64,
+ cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
@@ -788,7 +787,7 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
if (!(BIT(i) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(_VCS(i));
- drm_dbg(&i915->drm, "vcs%u fused off\n", i);
+ gt_dbg(gt, "vcs%u fused off\n", i);
continue;
}
@@ -796,8 +795,7 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
gt->info.vdbox_sfc_access |= BIT(i);
logical_vdbox++;
}
- drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(gt));
+ gt_dbg(gt, "vdbox enable: %04x, instances: %04lx\n", vdbox_mask, VDBOX_MASK(gt));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
for (i = 0; i < I915_MAX_VECS; i++) {
@@ -808,11 +806,10 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
if (!(BIT(i) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(_VECS(i));
- drm_dbg(&i915->drm, "vecs%u fused off\n", i);
+ gt_dbg(gt, "vecs%u fused off\n", i);
}
}
- drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(gt));
+ gt_dbg(gt, "vebox enable: %04x, instances: %04lx\n", vebox_mask, VEBOX_MASK(gt));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
}
@@ -838,7 +835,7 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
*/
for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
info->engine_mask &= ~BIT(_CCS(i));
- drm_dbg(&i915->drm, "ccs%u fused off\n", i);
+ gt_dbg(gt, "ccs%u fused off\n", i);
}
}
@@ -866,8 +863,8 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
_BCS(instance));
if (mask & info->engine_mask) {
- drm_dbg(&i915->drm, "bcs%u fused off\n", instance);
- drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1);
+ gt_dbg(gt, "bcs%u fused off\n", instance);
+ gt_dbg(gt, "bcs%u fused off\n", instance + 1);
info->engine_mask &= ~mask;
}
@@ -907,8 +904,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
* submission, which will wake up the GSC power well.
*/
if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(&gt->uc)) {
- drm_notice(&gt->i915->drm,
- "No GSC FW selected, disabling GSC CS and media C6\n");
+ gt_notice(gt, "No GSC FW selected, disabling GSC CS and media C6\n");
info->engine_mask &= ~BIT(GSC0);
}
@@ -1097,8 +1093,7 @@ static int init_status_page(struct intel_engine_cs *engine)
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
- drm_err(&engine->i915->drm,
- "Failed to allocate status page\n");
+ gt_err(engine->gt, "Failed to allocate status page\n");
return PTR_ERR(obj);
}
@@ -1419,6 +1414,20 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
}
static struct intel_context *
+create_ggtt_bind_context(struct intel_engine_cs *engine)
+{
+ static struct lock_class_key kernel;
+
+ /*
+ * MI_UPDATE_GTT can insert up to 511 PTE entries and there could be multiple
+ * bind requets at a time so get a bigger ring.
+ */
+ return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
+ I915_GEM_HWS_GGTT_BIND_ADDR,
+ &kernel, "ggtt_bind_context");
+}
+
+static struct intel_context *
create_kernel_context(struct intel_engine_cs *engine)
{
static struct lock_class_key kernel;
@@ -1441,7 +1450,7 @@ create_kernel_context(struct intel_engine_cs *engine)
*/
static int engine_init_common(struct intel_engine_cs *engine)
{
- struct intel_context *ce;
+ struct intel_context *ce, *bce = NULL;
int ret;
engine->set_default_submission(engine);
@@ -1457,17 +1466,33 @@ static int engine_init_common(struct intel_engine_cs *engine)
ce = create_kernel_context(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
+ /*
+ * Create a separate pinned context for GGTT update with blitter engine
+ * if a platform require such service. MI_UPDATE_GTT works on other
+ * engines as well but BCS should be less busy engine so pick that for
+ * GGTT updates.
+ */
+ if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) {
+ bce = create_ggtt_bind_context(engine);
+ if (IS_ERR(bce)) {
+ ret = PTR_ERR(bce);
+ goto err_ce_context;
+ }
+ }
ret = measure_breadcrumb_dw(ce);
if (ret < 0)
- goto err_context;
+ goto err_bce_context;
engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
+ engine->bind_context = bce;
return 0;
-err_context:
+err_bce_context:
+ intel_engine_destroy_pinned_context(bce);
+err_ce_context:
intel_engine_destroy_pinned_context(ce);
return ret;
}
@@ -1537,6 +1562,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->kernel_context)
intel_engine_destroy_pinned_context(engine->kernel_context);
+ if (engine->bind_context)
+ intel_engine_destroy_pinned_context(engine->bind_context);
+
+
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
cleanup_status_page(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index a7e677598004..8769760257fd 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -402,7 +402,15 @@ struct intel_engine_cs {
unsigned long context_tag;
- struct rb_node uabi_node;
+ /*
+ * The type evolves during initialization, see related comment for
+ * struct drm_i915_private's uabi_engines member.
+ */
+ union {
+ struct llist_node uabi_llist;
+ struct list_head uabi_list;
+ struct rb_node uabi_node;
+ };
struct intel_sseu sseu;
@@ -416,6 +424,9 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
+ struct intel_context *bind_context; /* pinned, only for BCS0 */
+ /* mark the bind context's availability status */
+ bool bind_context_ready;
/**
* pinned_contexts_list: List of pinned contexts. This list is only
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index dcedff41a825..118164ddbb2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -38,8 +38,7 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
void intel_engine_add_user(struct intel_engine_cs *engine)
{
- llist_add((struct llist_node *)&engine->uabi_node,
- (struct llist_head *)&engine->i915->uabi_engines);
+ llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
static const u8 uabi_classes[] = {
@@ -54,9 +53,9 @@ static int engine_cmp(void *priv, const struct list_head *A,
const struct list_head *B)
{
const struct intel_engine_cs *a =
- container_of((struct rb_node *)A, typeof(*a), uabi_node);
+ container_of(A, typeof(*a), uabi_list);
const struct intel_engine_cs *b =
- container_of((struct rb_node *)B, typeof(*b), uabi_node);
+ container_of(B, typeof(*b), uabi_list);
if (uabi_classes[a->class] < uabi_classes[b->class])
return -1;
@@ -73,7 +72,7 @@ static int engine_cmp(void *priv, const struct list_head *A,
static struct llist_node *get_engines(struct drm_i915_private *i915)
{
- return llist_del_all((struct llist_head *)&i915->uabi_engines);
+ return llist_del_all(&i915->uabi_engines_llist);
}
static void sort_engines(struct drm_i915_private *i915,
@@ -83,9 +82,8 @@ static void sort_engines(struct drm_i915_private *i915,
llist_for_each_safe(pos, next, get_engines(i915)) {
struct intel_engine_cs *engine =
- container_of((struct rb_node *)pos, typeof(*engine),
- uabi_node);
- list_add((struct list_head *)&engine->uabi_node, engines);
+ container_of(pos, typeof(*engine), uabi_llist);
+ list_add(&engine->uabi_list, engines);
}
list_sort(NULL, engines, engine_cmp);
}
@@ -213,8 +211,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
p = &i915->uabi_engines.rb_node;
list_for_each_safe(it, next, &engines) {
struct intel_engine_cs *engine =
- container_of((struct rb_node *)it, typeof(*engine),
- uabi_node);
+ container_of(it, typeof(*engine), uabi_list);
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index da21f2786b5d..4d7d88b92632 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -15,18 +15,23 @@
#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
+#include "intel_context.h"
#include "intel_ggtt_gmch.h"
+#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_regs.h"
#include "intel_pci_config.h"
+#include "intel_ring.h"
#include "i915_drv.h"
#include "i915_pci.h"
+#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
+#include "intel_engine_pm.h"
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
@@ -252,6 +257,145 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr,
return pte;
}
+static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
+{
+ struct intel_gt *gt = ggtt->vm.gt;
+
+ return intel_gt_is_bind_context_ready(gt);
+}
+
+static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
+{
+ struct intel_context *ce;
+ struct intel_gt *gt = ggtt->vm.gt;
+
+ if (intel_gt_is_wedged(gt))
+ return NULL;
+
+ ce = gt->engine[BCS0]->bind_context;
+ GEM_BUG_ON(!ce);
+
+ /*
+ * If the GT is not awake already at this stage then fallback
+ * to pci based GGTT update otherwise __intel_wakeref_get_first()
+ * would conflict with fs_reclaim trying to allocate memory while
+ * doing rpm_resume().
+ */
+ if (!intel_gt_pm_get_if_awake(gt))
+ return NULL;
+
+ intel_engine_pm_get(ce->engine);
+
+ return ce;
+}
+
+static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
+{
+ intel_engine_pm_put(ce->engine);
+ intel_gt_pm_put(ce->engine->gt);
+}
+
+static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
+ struct sg_table *pages, u32 num_entries,
+ const gen8_pte_t pte)
+{
+ struct i915_sched_attr attr = {};
+ struct intel_gt *gt = ggtt->vm.gt;
+ const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
+ struct sgt_iter iter;
+ struct i915_request *rq;
+ struct intel_context *ce;
+ u32 *cs;
+
+ if (!num_entries)
+ return true;
+
+ ce = gen8_ggtt_bind_get_ce(ggtt);
+ if (!ce)
+ return false;
+
+ if (pages)
+ iter = __sgt_iter(pages->sgl, true);
+
+ while (num_entries) {
+ int count = 0;
+ dma_addr_t addr;
+ /*
+ * MI_UPDATE_GTT can update 512 entries in a single command but
+ * that end up with engine reset, 511 works.
+ */
+ u32 n_ptes = min_t(u32, 511, num_entries);
+
+ if (mutex_lock_interruptible(&ce->timeline->mutex))
+ goto put_ce;
+
+ intel_context_enter(ce);
+ rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
+ intel_context_exit(ce);
+ if (IS_ERR(rq)) {
+ GT_TRACE(gt, "Failed to get bind request\n");
+ mutex_unlock(&ce->timeline->mutex);
+ goto put_ce;
+ }
+
+ cs = intel_ring_begin(rq, 2 * n_ptes + 2);
+ if (IS_ERR(cs)) {
+ GT_TRACE(gt, "Failed to ring space for GGTT bind\n");
+ i915_request_set_error_once(rq, PTR_ERR(cs));
+ /* once a request is created, it must be queued */
+ goto queue_err_rq;
+ }
+
+ *cs++ = MI_UPDATE_GTT | (2 * n_ptes);
+ *cs++ = offset << 12;
+
+ if (pages) {
+ for_each_sgt_daddr_next(addr, iter) {
+ if (count == n_ptes)
+ break;
+ *cs++ = lower_32_bits(pte | addr);
+ *cs++ = upper_32_bits(pte | addr);
+ count++;
+ }
+ /* fill remaining with scratch pte, if any */
+ if (count < n_ptes) {
+ memset64((u64 *)cs, scratch_pte,
+ n_ptes - count);
+ cs += (n_ptes - count) * 2;
+ }
+ } else {
+ memset64((u64 *)cs, pte, n_ptes);
+ cs += n_ptes * 2;
+ }
+
+ intel_ring_advance(rq, cs);
+queue_err_rq:
+ i915_request_get(rq);
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+ mutex_unlock(&ce->timeline->mutex);
+ /* This will break if the request is complete or after engine reset */
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ if (rq->fence.error)
+ goto err_rq;
+
+ i915_request_put(rq);
+
+ num_entries -= n_ptes;
+ offset += n_ptes;
+ }
+
+ gen8_ggtt_bind_put_ce(ce);
+ return true;
+
+err_rq:
+ i915_request_put(rq);
+put_ce:
+ gen8_ggtt_bind_put_ce(ce);
+ return false;
+}
+
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -272,6 +416,21 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
+ dma_addr_t addr, u64 offset,
+ unsigned int pat_index, u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t pte;
+
+ pte = ggtt->vm.pte_encode(addr, pat_index, flags);
+ if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
+ gen8_ggtt_bind_ptes(ggtt, offset, NULL, 1, pte))
+ return ggtt->invalidate(ggtt);
+
+ gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
+}
+
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_vma_resource *vma_res,
unsigned int pat_index,
@@ -311,6 +470,50 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
ggtt->invalidate(ggtt);
}
+static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ unsigned int pat_index, u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ gen8_pte_t pte_encode;
+ u64 start, end;
+
+ pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
+ start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+ end = start + vma_res->guard / I915_GTT_PAGE_SIZE;
+ if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
+ goto err;
+
+ start = end;
+ end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
+ if (!gen8_ggtt_bind_ptes(ggtt, start, vma_res->bi.pages,
+ vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode))
+ goto err;
+
+ start += vma_res->node_size / I915_GTT_PAGE_SIZE;
+ if (!gen8_ggtt_bind_ptes(ggtt, start, NULL, end - start, scratch_pte))
+ goto err;
+
+ return true;
+
+err:
+ return false;
+}
+
+static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ unsigned int pat_index, u32 flags)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
+ __gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
+ return ggtt->invalidate(ggtt);
+
+ gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
+}
+
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -332,6 +535,27 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
gen8_set_pte(&gtt_base[i], scratch_pte);
}
+static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
+ unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
+ const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
+
+ if (WARN(num_entries > max_entries,
+ "First entry = %d; Num entries = %d (max=%d)\n",
+ first_entry, num_entries, max_entries))
+ num_entries = max_entries;
+
+ if (should_update_ggtt_with_bind(ggtt) && gen8_ggtt_bind_ptes(ggtt, first_entry,
+ NULL, num_entries, scratch_pte))
+ return ggtt->invalidate(ggtt);
+
+ gen8_ggtt_clear_range(vm, start, length);
+}
+
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
u64 offset,
@@ -1008,6 +1232,17 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
+ if (i915_ggtt_require_binder(i915)) {
+ ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
+ ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
+ ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
+ /*
+ * On GPU is hung, we might bind VMAs for error capture.
+ * Fallback to CPU GGTT updates in that case.
+ */
+ ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
+ }
+
if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
ggtt->invalidate = guc_ggtt_invalidate;
else
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index bcc3605158db..6d440de8ba01 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -11,6 +11,7 @@
#include "gem/i915_gem_region.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#define GSC_BAR_LENGTH 0x00000FFC
@@ -49,13 +50,13 @@ gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size
I915_BO_ALLOC_CONTIGUOUS |
I915_BO_ALLOC_CPU_CLEAR);
if (IS_ERR(obj)) {
- drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
+ gt_err(gt, "Failed to allocate gsc memory\n");
return PTR_ERR(obj);
}
err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
- drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
+ gt_err(gt, "Failed to pin pages for gsc memory\n");
goto out_put;
}
@@ -286,12 +287,12 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
int ret;
if (intf_id >= INTEL_GSC_NUM_INTERFACES) {
- drm_warn_once(&gt->i915->drm, "GSC irq: intf_id %d is out of range", intf_id);
+ gt_warn_once(gt, "GSC irq: intf_id %d is out of range", intf_id);
return;
}
if (!HAS_HECI_GSC(gt->i915)) {
- drm_warn_once(&gt->i915->drm, "GSC irq: not supported");
+ gt_warn_once(gt, "GSC irq: not supported");
return;
}
@@ -300,7 +301,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
- drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
+ gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret);
}
void intel_gsc_irq_handler(struct intel_gt *gt, u32 iir)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index bb6c3f68f7d2..ed32bf5b1546 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -268,10 +268,21 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
I915_MASTER_ERROR_INTERRUPT);
}
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ /*
+ * For the media GT, this ring fault register is not replicated,
+ * so don't do multicast/replicated register read/write operation on it.
+ */
+ if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
+ intel_uncore_rmw(uncore, XELPMP_RING_FAULT_REG,
+ RING_FAULT_VALID, 0);
+ intel_uncore_posting_read(uncore,
+ XELPMP_RING_FAULT_REG);
+
+ } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
+
} else if (GRAPHICS_VER(i915) >= 12) {
intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
@@ -1028,3 +1039,52 @@ bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
}
+
+static void __intel_gt_bind_context_set_ready(struct intel_gt *gt, bool ready)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine && engine->bind_context)
+ engine->bind_context_ready = ready;
+}
+
+/**
+ * intel_gt_bind_context_set_ready - Set the context binding as ready
+ *
+ * @gt: GT structure
+ *
+ * This function marks the binder context as ready.
+ */
+void intel_gt_bind_context_set_ready(struct intel_gt *gt)
+{
+ __intel_gt_bind_context_set_ready(gt, true);
+}
+
+/**
+ * intel_gt_bind_context_set_unready - Set the context binding as ready
+ * @gt: GT structure
+ *
+ * This function marks the binder context as not ready.
+ */
+
+void intel_gt_bind_context_set_unready(struct intel_gt *gt)
+{
+ __intel_gt_bind_context_set_ready(gt, false);
+}
+
+/**
+ * intel_gt_is_bind_context_ready - Check if context binding is ready
+ *
+ * @gt: GT structure
+ *
+ * This function returns binder context's ready status.
+ */
+bool intel_gt_is_bind_context_ready(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine = gt->engine[BCS0];
+
+ if (engine)
+ return engine->bind_context_ready;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 2cac499d5aa3..970bedf6b78a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -176,4 +176,7 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
struct drm_i915_gem_object *obj,
bool always_coherent);
+void intel_gt_bind_context_set_ready(struct intel_gt *gt);
+void intel_gt_bind_context_set_unready(struct intel_gt *gt);
+bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index bf4a933de03a..326c2ed1d99b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -420,6 +420,28 @@ void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
}
/**
+ * intel_gt_mcr_lock_sanitize - Sanitize MCR steering lock
+ * @gt: GT structure
+ *
+ * This will be used to sanitize the initial status of the hardware lock
+ * during driver load and resume since there won't be any concurrent access
+ * from other agents at those times, but it's possible that boot firmware
+ * may have left the lock in a bad state.
+ *
+ */
+void intel_gt_mcr_lock_sanitize(struct intel_gt *gt)
+{
+ /*
+ * This gets called at load/resume time, so we shouldn't be
+ * racing with other driver threads grabbing the mcr lock.
+ */
+ lockdep_assert_not_held(&gt->mcr_lock);
+
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
+}
+
+/**
* intel_gt_mcr_read - read a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to read
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
index 41684495b7da..01ac565a56a4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -11,6 +11,7 @@
void intel_gt_mcr_init(struct intel_gt *gt);
void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags);
void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags);
+void intel_gt_mcr_lock_sanitize(struct intel_gt *gt);
u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_mcr_reg_t reg,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 5a942af0a14e..f5899d503e23 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -13,6 +13,7 @@
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
#include "intel_gt_print.h"
#include "intel_gt_requests.h"
@@ -216,6 +217,21 @@ void intel_gt_pm_fini(struct intel_gt *gt)
intel_rc6_fini(&gt->rc6);
}
+void intel_gt_resume_early(struct intel_gt *gt)
+{
+ /*
+ * Sanitize steer semaphores during driver resume. This is necessary
+ * to address observed cases of steer semaphores being
+ * held after a suspend operation. Confirmation from the hardware team
+ * assures the safety of this operation, as no lock acquisitions
+ * by other agents occur during driver load/resume process.
+ */
+ intel_gt_mcr_lock_sanitize(gt);
+
+ intel_uncore_resume_early(gt->uncore);
+ intel_gt_check_and_clear_faults(gt);
+}
+
int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
@@ -280,6 +296,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
+ intel_gt_bind_context_set_ready(gt);
return err;
err_wedged:
@@ -306,6 +323,7 @@ static void wait_for_suspend(struct intel_gt *gt)
void intel_gt_suspend_prepare(struct intel_gt *gt)
{
+ intel_gt_bind_context_set_unready(gt);
user_forcewake(gt, true);
wait_for_suspend(gt);
}
@@ -359,6 +377,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
+ intel_gt_bind_context_set_unready(gt);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -376,6 +395,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
+ intel_gt_bind_context_set_ready(gt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 6c9a46452364..b1eeb5b33918 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -78,6 +78,7 @@ void intel_gt_pm_fini(struct intel_gt *gt);
void intel_gt_suspend_prepare(struct intel_gt *gt);
void intel_gt_suspend_late(struct intel_gt *gt);
int intel_gt_resume(struct intel_gt *gt);
+void intel_gt_resume_early(struct intel_gt *gt);
void intel_gt_runtime_suspend(struct intel_gt *gt);
int intel_gt_runtime_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h
index 55a336a9ff06..7fdc78c79273 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_print.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h
@@ -16,6 +16,9 @@
#define gt_warn(_gt, _fmt, ...) \
drm_warn(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+#define gt_warn_once(_gt, _fmt, ...) \
+ drm_warn_once(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
#define gt_notice(_gt, _fmt, ...) \
drm_notice(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index cca4bac8f8b0..eecd0a87a647 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1084,6 +1084,7 @@
#define GEN12_RING_FAULT_REG _MMIO(0xcec4)
#define XEHP_RING_FAULT_REG MCR_REG(0xcec4)
+#define XELPMP_RING_FAULT_REG _MMIO(0xcec4)
#define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
#define RING_FAULT_GTTSEL_MASK (1 << 11)
#define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 13944a14ea2d..4fbed27ef0ec 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -21,6 +21,11 @@
#include "intel_gt_regs.h"
#include "intel_gtt.h"
+bool i915_ggtt_require_binder(struct drm_i915_private *i915)
+{
+ /* Wa_13010847436 & Wa_14019519902 */
+ return MEDIA_VER_FULL(i915) == IP_VER(13, 0);
+}
static bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 346ec8ec2edd..b471edac2699 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -171,6 +171,9 @@ struct intel_gt;
#define for_each_sgt_daddr(__dp, __iter, __sgt) \
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
+#define for_each_sgt_daddr_next(__dp, __iter) \
+ __for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE)
+
struct i915_page_table {
struct drm_i915_gem_object *base;
union {
@@ -688,4 +691,6 @@ static inline struct sgt_dma {
return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
}
+bool i915_ggtt_require_binder(struct drm_i915_private *i915);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index a21e939fdbf6..d5ed904f355d 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -26,6 +26,7 @@
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
@@ -592,10 +593,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
- drm_err(&engine->i915->drm,
- "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
- engine->name, request,
- intel_uncore_read_fw(uncore, reg));
+ gt_err(engine->gt,
+ "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
@@ -1199,17 +1200,16 @@ void intel_gt_reset(struct intel_gt *gt,
goto unlock;
if (reason)
- drm_notice(&gt->i915->drm,
- "Resetting chip for %s\n", reason);
+ gt_notice(gt, "Resetting chip for %s\n", reason);
atomic_inc(&gt->i915->gpu_error.reset_count);
awake = reset_prepare(gt);
if (!intel_has_gpu_reset(gt)) {
if (gt->i915->params.reset)
- drm_err(&gt->i915->drm, "GPU reset not supported\n");
+ gt_err(gt, "GPU reset not supported\n");
else
- drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
+ gt_dbg(gt, "GPU reset disabled\n");
goto error;
}
@@ -1217,7 +1217,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_runtime_pm_disable_interrupts(gt->i915);
if (do_reset(gt, stalled_mask)) {
- drm_err(&gt->i915->drm, "Failed to reset chip\n");
+ gt_err(gt, "Failed to reset chip\n");
goto taint;
}
@@ -1236,9 +1236,7 @@ void intel_gt_reset(struct intel_gt *gt,
*/
ret = intel_gt_init_hw(gt);
if (ret) {
- drm_err(&gt->i915->drm,
- "Failed to initialise HW following reset (%d)\n",
- ret);
+ gt_err(gt, "Failed to initialise HW following reset (%d)\n", ret);
goto taint;
}
@@ -1605,9 +1603,7 @@ static void intel_wedge_me(struct work_struct *work)
{
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- drm_err(&w->gt->i915->drm,
- "%s timed out, cancelling all in-flight rendering.\n",
- w->name);
+ gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
intel_gt_set_wedged(w->gt);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index b86a10b1f534..192ac0e59afa 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -11,6 +11,7 @@
#include "intel_gpu_commands.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_ring.h"
#include "intel_workarounds.h"
@@ -119,8 +120,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
if (!wal->count)
return;
- drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n",
- wal->wa_count, wal->name, wal->engine_name);
+ gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
+ wal->wa_count, wal->name, wal->engine_name);
}
static enum forcewake_domains
@@ -1780,10 +1781,10 @@ wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
const char *name, const char *from)
{
if ((cur ^ wa->set) & wa->read) {
- drm_err(&gt->i915->drm,
- "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
- name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read, wa->set & wa->read);
+ gt_err(gt,
+ "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg),
+ cur, cur & wa->read, wa->set & wa->read);
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index 0d3b22a74365..453d855dd1de 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -68,8 +68,7 @@ static void gsc_work(struct work_struct *work)
* A proxy failure right after firmware load means the proxy-init
* step has failed so mark GSC as not usable after this
*/
- drm_err(&gt->i915->drm,
- "GSC proxy handler failed to init\n");
+ gt_err(gt, "GSC proxy handler failed to init\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
goto out_put;
@@ -83,11 +82,10 @@ static void gsc_work(struct work_struct *work)
* status register to check if the proxy init was actually successful
*/
if (intel_gsc_uc_fw_proxy_init_done(gsc, false)) {
- drm_dbg(&gt->i915->drm, "GSC Proxy initialized\n");
+ gt_dbg(gt, "GSC Proxy initialized\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
} else {
- drm_err(&gt->i915->drm,
- "GSC status reports proxy init not complete\n");
+ gt_err(gt, "GSC status reports proxy init not complete\n");
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 27df41c53b89..3f3df1166b86 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -319,6 +319,12 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (!RCS_MASK(gt))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
+ /* Wa_14018913170 */
+ if (GUC_FIRMWARE_VER(guc) >= MAKE_GUC_VER(70, 7, 0)) {
+ if (IS_DG2(gt->i915) || IS_METEORLAKE(gt->i915) || IS_PONTEVECCHIO(gt->i915))
+ flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
+ }
+
return flags;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 6c392bad29c1..818c8c146fd4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -295,6 +295,7 @@ struct intel_guc {
#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
#define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version)
+#define GUC_FIRMWARE_VER(guc) MAKE_GUC_VER_STRUCT((guc)->fw.file_selected.ver)
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
{
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 6e22af31513a..c33210ead1ef 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -96,7 +96,7 @@ struct ct_request {
struct ct_incoming_msg {
struct list_head link;
u32 size;
- u32 msg[];
+ u32 msg[] __counted_by(size);
};
enum { CTB_SEND = 0, CTB_RECV = 1 };
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index b4d56eccfb1f..123ad75d2eb2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -100,6 +100,7 @@
#define GUC_WA_HOLD_CCS_SWITCHOUT BIT(17)
#define GUC_WA_POLLCS BIT(18)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
+#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index ae3495a9c814..2cce5ec1ff00 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -4802,19 +4802,19 @@ static void guc_context_replay(struct intel_context *ce)
static void guc_handle_context_reset(struct intel_guc *guc,
struct intel_context *ce)
{
+ bool capture = intel_context_is_schedulable(ce);
+
trace_intel_context_reset(ce);
- guc_dbg(guc, "Got context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
+ guc_dbg(guc, "%s context reset notification: 0x%04X on %s, exiting = %s, banned = %s\n",
+ capture ? "Got" : "Ignoring",
ce->guc_id.id, ce->engine->name,
str_yes_no(intel_context_is_exiting(ce)),
str_yes_no(intel_context_is_banned(ce)));
- if (likely(intel_context_is_schedulable(ce))) {
+ if (capture) {
capture_error_state(guc, ce);
guc_context_replay(ce);
- } else {
- guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s",
- ce->guc_id.id, ce->engine->name);
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 32e27e9a2490..362639162ed6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -88,12 +88,12 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* security fixes, etc. to be enabled.
*/
#define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_maj, guc_mmp) \
- fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 6, 6)) \
- fw_def(DG2, 0, guc_maj(dg2, 70, 5, 1)) \
- fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 5, 1)) \
+ fw_def(METEORLAKE, 0, guc_maj(mtl, 70, 12, 1)) \
+ fw_def(DG2, 0, guc_maj(dg2, 70, 12, 1)) \
+ fw_def(ALDERLAKE_P, 0, guc_maj(adlp, 70, 12, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 70, 1, 1)) \
fw_def(ALDERLAKE_P, 0, guc_mmp(adlp, 69, 0, 3)) \
- fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 5, 1)) \
+ fw_def(ALDERLAKE_S, 0, guc_maj(tgl, 70, 12, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 70, 1, 1)) \
fw_def(ALDERLAKE_S, 0, guc_mmp(tgl, 69, 0, 3)) \
fw_def(DG1, 0, guc_maj(dg1, 70, 5, 1)) \