diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_guc_ct.c')
-rw-r--r-- | drivers/gpu/drm/xe/xe_guc_ct.c | 1011 |
1 files changed, 783 insertions, 228 deletions
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 24a33fa36496..2447de0ebedf 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -8,26 +8,73 @@ #include <linux/bitfield.h> #include <linux/circ_buf.h> #include <linux/delay.h> +#include <linux/fault-inject.h> + +#include <kunit/static_stub.h> #include <drm/drm_managed.h> #include "abi/guc_actions_abi.h" +#include "abi/guc_actions_sriov_abi.h" #include "abi/guc_klvs_abi.h" #include "xe_bo.h" +#include "xe_devcoredump.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_pagefault.h" +#include "xe_gt_printk.h" +#include "xe_gt_sriov_pf_control.h" +#include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" +#include "xe_guc_log.h" +#include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_map.h" #include "xe_pm.h" -#include "xe_trace.h" +#include "xe_trace_guc.h" + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +enum { + /* Internal states, not error conditions */ + CT_DEAD_STATE_REARM, /* 0x0001 */ + CT_DEAD_STATE_CAPTURE, /* 0x0002 */ + + /* Error conditions */ + CT_DEAD_SETUP, /* 0x0004 */ + CT_DEAD_H2G_WRITE, /* 0x0008 */ + CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */ + CT_DEAD_G2H_READ, /* 0x0020 */ + CT_DEAD_G2H_RECV, /* 0x0040 */ + CT_DEAD_G2H_RELEASE, /* 0x0080 */ + CT_DEAD_DEADLOCK, /* 0x0100 */ + CT_DEAD_PROCESS_FAILED, /* 0x0200 */ + CT_DEAD_FAST_G2H, /* 0x0400 */ + CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */ + CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */ + CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */ + CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */ + CT_DEAD_CRASH, /* 0x8000 */ +}; + +static void ct_dead_worker_func(struct work_struct *w); +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); + +#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) +#else +#define CT_DEAD(ct, ctb, reason) \ + do { \ + struct guc_ctb *_ctb = (ctb); \ + if (_ctb) \ + _ctb->info.broken = true; \ + } while (0) +#endif /* Used when a CT send wants to block and / or receive data */ struct g2h_fence { u32 *response_buffer; u32 seqno; + u32 response_data; u16 response_len; u16 error; u16 hint; @@ -40,6 +87,7 @@ struct g2h_fence { static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) { g2h_fence->response_buffer = response_buffer; + g2h_fence->response_data = 0; g2h_fence->response_len = 0; g2h_fence->fail = false; g2h_fence->retry = false; @@ -96,12 +144,37 @@ ct_to_xe(struct xe_guc_ct *ct) * enough space to avoid backpressure on the driver. We increase the size * of the receive buffer (relative to the send) to ensure a G2H response * CTB has a landing spot. + * + * In addition to submissions, the G2H buffer needs to be able to hold + * enough space for recoverable page fault notifications. The number of + * page faults is interrupt driven and can be as much as the number of + * compute resources available. However, most of the actual work for these + * is in a separate page fault worker thread. Therefore we only need to + * make sure the queue has enough space to handle all of the submissions + * and responses and an extra buffer for incoming page faults. */ #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) #define CTB_H2G_BUFFER_SIZE (SZ_4K) -#define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE) -#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4) +#define CTB_G2H_BUFFER_SIZE (SZ_128K) +#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2) + +/** + * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full + * CT command queue + * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future. + * + * Observation is that a 4KiB buffer full of commands takes a little over a + * second to process. Use that to calculate maximum time to process a full CT + * command queue. + * + * Return: Maximum time to process a full CT queue in jiffies. + */ +long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct) +{ + BUILD_BUG_ON(!IS_ALIGNED(CTB_H2G_BUFFER_SIZE, SZ_4)); + return (CTB_H2G_BUFFER_SIZE / SZ_4K) * HZ; +} static size_t guc_ct_size(void) { @@ -113,10 +186,13 @@ static void guc_ct_fini(struct drm_device *drm, void *arg) { struct xe_guc_ct *ct = arg; + destroy_workqueue(ct->g2h_wq); xa_destroy(&ct->fence_lookup); } +static void receive_g2h(struct xe_guc_ct *ct); static void g2h_worker_func(struct work_struct *w); +static void safe_mode_worker_func(struct work_struct *w); static void primelockdep(struct xe_guc_ct *ct) { @@ -136,20 +212,34 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) struct xe_bo *bo; int err; - xe_assert(xe, !(guc_ct_size() % PAGE_SIZE)); + xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); + + ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM); + if (!ct->g2h_wq) + return -ENOMEM; - drmm_mutex_init(&xe->drm, &ct->lock); spin_lock_init(&ct->fast_lock); xa_init(&ct->fence_lookup); INIT_WORK(&ct->g2h_worker, g2h_worker_func); + INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + spin_lock_init(&ct->dead.lock); + INIT_WORK(&ct->dead.worker, ct_dead_worker_func); +#endif init_waitqueue_head(&ct->wq); init_waitqueue_head(&ct->g2h_fence_wq); + err = drmm_mutex_init(&xe->drm, &ct->lock); + if (err) + return err; + primelockdep(ct); bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), - XE_BO_CREATE_VRAM_IF_DGFX(tile) | - XE_BO_CREATE_GGTT_BIT); + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_GGTT_INVALIDATE | + XE_BO_FLAG_PINNED_NORESTORE); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -159,8 +249,11 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) if (err) return err; + xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); + ct->state = XE_GUC_CT_STATE_DISABLED; return 0; } +ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */ #define desc_read(xe_, guc_ctb__, field_) \ xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \ @@ -278,13 +371,76 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) return ret > 0 ? -EPROTO : ret; } +static void xe_guc_ct_set_state(struct xe_guc_ct *ct, + enum xe_guc_ct_state state) +{ + mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ + spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ + + xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || + state == XE_GUC_CT_STATE_STOPPED); + + if (ct->g2h_outstanding) + xe_pm_runtime_put(ct_to_xe(ct)); + ct->g2h_outstanding = 0; + ct->state = state; + + spin_unlock_irq(&ct->fast_lock); + + /* + * Lockdep doesn't like this under the fast lock and he destroy only + * needs to be serialized with the send path which ct lock provides. + */ + xa_destroy(&ct->fence_lookup); + + mutex_unlock(&ct->lock); +} + +static bool ct_needs_safe_mode(struct xe_guc_ct *ct) +{ + return !pci_dev_msi_enabled(to_pci_dev(ct_to_xe(ct)->drm.dev)); +} + +static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct) +{ + if (!ct_needs_safe_mode(ct)) + return false; + + queue_delayed_work(ct->g2h_wq, &ct->safe_mode_worker, HZ / 10); + return true; +} + +static void safe_mode_worker_func(struct work_struct *w) +{ + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, safe_mode_worker.work); + + receive_g2h(ct); + + if (!ct_restart_safe_mode_worker(ct)) + xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode canceled\n"); +} + +static void ct_enter_safe_mode(struct xe_guc_ct *ct) +{ + if (ct_restart_safe_mode_worker(ct)) + xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode enabled\n"); +} + +static void ct_exit_safe_mode(struct xe_guc_ct *ct) +{ + if (cancel_delayed_work_sync(&ct->safe_mode_worker)) + xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n"); +} + int xe_guc_ct_enable(struct xe_guc_ct *ct) { struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); int err; - xe_assert(xe, !ct->enabled); + xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); + xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); @@ -300,34 +456,66 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (err) goto err_out; - mutex_lock(&ct->lock); - spin_lock_irq(&ct->fast_lock); - ct->g2h_outstanding = 0; - ct->enabled = true; - spin_unlock_irq(&ct->fast_lock); - mutex_unlock(&ct->lock); + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); smp_mb(); wake_up_all(&ct->wq); - drm_dbg(&xe->drm, "GuC CT communication channel enabled\n"); + xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); + + if (ct_needs_safe_mode(ct)) + ct_enter_safe_mode(ct); + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /* + * The CT has now been reset so the dumper can be re-armed + * after any existing dead state has been dumped. + */ + spin_lock_irq(&ct->dead.lock); + if (ct->dead.reason) { + ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); + queue_work(system_unbound_wq, &ct->dead.worker); + } + spin_unlock_irq(&ct->dead.lock); +#endif return 0; err_out: - drm_err(&xe->drm, "Failed to enable CT (%d)\n", err); + xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); + CT_DEAD(ct, NULL, SETUP); return err; } +static void stop_g2h_handler(struct xe_guc_ct *ct) +{ + cancel_work_sync(&ct->g2h_worker); +} + +/** + * xe_guc_ct_disable - Set GuC to disabled state + * @ct: the &xe_guc_ct + * + * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected + * in this transition. + */ void xe_guc_ct_disable(struct xe_guc_ct *ct) { - mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ - spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ - ct->enabled = false; /* Finally disable CT communication */ - spin_unlock_irq(&ct->fast_lock); - mutex_unlock(&ct->lock); + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); + ct_exit_safe_mode(ct); + stop_g2h_handler(ct); +} - xa_destroy(&ct->fence_lookup); +/** + * xe_guc_ct_stop - Set GuC to stopped state + * @ct: the &xe_guc_ct + * + * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h + */ +void xe_guc_ct_stop(struct xe_guc_ct *ct) +{ + xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); + stop_g2h_handler(ct); } static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) @@ -338,6 +526,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) if (cmd_len > h2g->info.space) { h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); + + if (h2g->info.head > h2g->info.size) { + struct xe_device *xe = ct_to_xe(ct); + u32 desc_status = desc_read(xe, h2g, status); + + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + + xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", + h2g->info.head, h2g->info.size); + CT_DEAD(ct, h2g, H2G_HAS_ROOM); + return false; + } + h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, h2g->info.size) - h2g->info.resv_space; @@ -376,11 +577,16 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len) static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) { - xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space); + xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); + xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) || + (g2h_len && num_g2h)); if (g2h_len) { lockdep_assert_held(&ct->fast_lock); + if (!ct->g2h_outstanding) + xe_pm_runtime_get_noresume(ct_to_xe(ct)); + ct->ctbs.g2h.info.space -= g2h_len; ct->g2h_outstanding += num_g2h; } @@ -388,12 +594,28 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) { + bool bad = false; + lockdep_assert_held(&ct->fast_lock); - xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <= - ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); + + bad = ct->ctbs.g2h.info.space + g2h_len > + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; + bad |= !ct->g2h_outstanding; + + if (bad) { + xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", + ct->ctbs.g2h.info.space, g2h_len, + ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, + ct->ctbs.g2h.info.space + g2h_len, + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, + ct->g2h_outstanding); + CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); + return; + } ct->ctbs.g2h.info.space += g2h_len; - --ct->g2h_outstanding; + if (!--ct->g2h_outstanding) + xe_pm_runtime_put(ct_to_xe(ct)); } static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) @@ -409,18 +631,50 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 ct_fence_value, bool want_response) { struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); struct guc_ctb *h2g = &ct->ctbs.h2g; u32 cmd[H2G_CT_HEADERS]; u32 tail = h2g->info.tail; u32 full_len; struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, tail * sizeof(u32)); + u32 desc_status; full_len = len + GUC_CTB_HDR_LEN; lockdep_assert_held(&ct->lock); - xe_assert(xe, full_len <= GUC_CTB_MSG_MAX_LEN); - xe_assert(xe, tail <= h2g->info.size); + xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); + + desc_status = desc_read(xe, h2g, status); + if (desc_status) { + xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); + goto corrupted; + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, h2g, tail); + u32 desc_head = desc_read(xe, h2g, head); + + if (tail != desc_tail) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); + goto corrupted; + } + + if (tail > h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", + tail, h2g->info.size); + goto corrupted; + } + + if (desc_head >= h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", + desc_head, h2g->info.size); + goto corrupted; + } + } /* Command will wrap, zero fill (NOPs), return and check credits again */ if (tail + full_len > h2g->info.size) { @@ -448,7 +702,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, GUC_HXG_EVENT_MSG_0_DATA0, action[0]); } else { cmd[1] = - FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_EVENT) | + FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) | FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | GUC_HXG_EVENT_MSG_0_DATA0, action[0]); } @@ -457,7 +711,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, --len; ++action; - /* Write H2G ensuring visable before descriptor update */ + /* Write H2G ensuring visible before descriptor update */ xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32)); xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32)); xe_device_wmb(xe); @@ -469,23 +723,48 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, /* Update descriptor */ desc_write(xe, h2g, tail, h2g->info.tail); - trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len, + trace_xe_guc_ctb_h2g(xe, gt->info.id, *(action - 1), full_len, desc_read(xe, h2g, head), h2g->info.tail); return 0; + +corrupted: + CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); + return -EPIPE; +} + +/* + * The CT protocol accepts a 16 bits fence. This field is fully owned by the + * driver, the GuC will just copy it to the reply message. Since we need to + * be able to distinguish between replies to REQUEST and FAST_REQUEST messages, + * we use one bit of the seqno as an indicator for that and a rolling counter + * for the remaining 15 bits. + */ +#define CT_SEQNO_MASK GENMASK(14, 0) +#define CT_SEQNO_UNTRACKED BIT(15) +static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence) +{ + u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK; + + if (!is_g2h_fence) + seqno |= CT_SEQNO_UNTRACKED; + + return seqno; } static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) { - struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt __maybe_unused = ct_to_gt(ct); + u16 seqno; int ret; - xe_assert(xe, !g2h_len || !g2h_fence); - xe_assert(xe, !num_g2h || !g2h_fence); - xe_assert(xe, !g2h_len || num_g2h); - xe_assert(xe, g2h_len || !num_g2h); + xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); + xe_gt_assert(gt, !g2h_len || !g2h_fence); + xe_gt_assert(gt, !num_g2h || !g2h_fence); + xe_gt_assert(gt, !g2h_len || num_g2h); + xe_gt_assert(gt, g2h_len || !num_g2h); lockdep_assert_held(&ct->lock); if (unlikely(ct->ctbs.h2g.info.broken)) { @@ -493,27 +772,34 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, goto out; } - if (unlikely(!ct->enabled)) { + if (ct->state == XE_GUC_CT_STATE_DISABLED) { ret = -ENODEV; goto out; } + if (ct->state == XE_GUC_CT_STATE_STOPPED) { + ret = -ECANCELED; + goto out; + } + + xe_gt_assert(gt, xe_guc_ct_enabled(ct)); + if (g2h_fence) { g2h_len = GUC_CTB_HXG_MSG_MAX_LEN; num_g2h = 1; if (g2h_fence_needs_alloc(g2h_fence)) { - void *ptr; - - g2h_fence->seqno = (ct->fence_seqno++ & 0xffff); - ptr = xa_store(&ct->fence_lookup, - g2h_fence->seqno, - g2h_fence, GFP_ATOMIC); - if (IS_ERR(ptr)) { - ret = PTR_ERR(ptr); + g2h_fence->seqno = next_ct_seqno(ct, true); + ret = xa_err(xa_store(&ct->fence_lookup, + g2h_fence->seqno, g2h_fence, + GFP_ATOMIC)); + if (ret) goto out; - } } + + seqno = g2h_fence->seqno; + } else { + seqno = next_ct_seqno(ct, false); } if (g2h_len) @@ -523,8 +809,7 @@ retry: if (unlikely(ret)) goto out_unlock; - ret = h2g_write(ct, action, len, g2h_fence ? g2h_fence->seqno : 0, - !!g2h_fence); + ret = h2g_write(ct, action, len, seqno, !!g2h_fence); if (unlikely(ret)) { if (ret == -EAGAIN) goto retry; @@ -551,12 +836,12 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence) { - struct drm_device *drm = &ct_to_xe(ct)->drm; - struct drm_printer p = drm_info_printer(drm->dev); + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); unsigned int sleep_period_ms = 1; int ret; - xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence); + xe_gt_assert(gt, !g2h_len || !g2h_fence); lockdep_assert_held(&ct->lock); xe_device_assert_mem_access(ct_to_xe(ct)); @@ -578,7 +863,7 @@ try_again: if (sleep_period_ms == 1024) goto broken; - trace_xe_guc_ct_h2g_flow_control(h2g->info.head, h2g->info.tail, + trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail, h2g->info.size, h2g->info.space, len + GUC_CTB_HDR_LEN); @@ -590,7 +875,7 @@ try_again: struct xe_device *xe = ct_to_xe(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; - trace_xe_guc_ct_g2h_flow_control(g2h->info.head, + trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head, desc_read(xe, g2h, tail), g2h->info.size, g2h->info.space, @@ -605,8 +890,13 @@ try_again: goto broken; #undef g2h_avail - if (dequeue_one_g2h(ct) < 0) + ret = dequeue_one_g2h(ct); + if (ret < 0) { + if (ret != -ECANCELED) + xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", + ERR_PTR(ret)); goto broken; + } goto try_again; } @@ -614,9 +904,8 @@ try_again: return ret; broken: - drm_err(drm, "No forward process on H2G, reset required"); - xe_guc_ct_print(ct, &p, true); - ct->ctbs.h2g.info.broken = true; + xe_gt_err(gt, "No forward process on H2G, reset required\n"); + CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); return -EDEADLK; } @@ -626,7 +915,7 @@ static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len, { int ret; - xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence); + xe_gt_assert(ct_to_gt(ct), !g2h_len || !g2h_fence); mutex_lock(&ct->lock); ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence); @@ -682,8 +971,9 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) return false; #define ct_alive(ct) \ - (ct->enabled && !ct->ctbs.h2g.info.broken && !ct->ctbs.g2h.info.broken) - if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) + (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ + !ct->ctbs.g2h.info.broken) + if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) return false; #undef ct_alive @@ -693,7 +983,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 *response_buffer, bool no_fail) { - struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); struct g2h_fence g2h_fence; int ret = 0; @@ -710,14 +1000,11 @@ retry: retry_same_fence: ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence); if (unlikely(ret == -ENOMEM)) { - void *ptr; - /* Retry allocation /w GFP_KERNEL */ - ptr = xa_store(&ct->fence_lookup, - g2h_fence.seqno, - &g2h_fence, GFP_KERNEL); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); + ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno, + &g2h_fence, GFP_KERNEL)); + if (ret) + return ret; goto retry_same_fence; } else if (unlikely(ret)) { @@ -728,38 +1015,81 @@ retry_same_fence: goto retry_same_fence; if (!g2h_fence_needs_alloc(&g2h_fence)) - xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + xa_erase(&ct->fence_lookup, g2h_fence.seqno); return ret; } ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ); if (!ret) { - drm_err(&xe->drm, "Timed out wait for G2H, fence %u, action %04x", - g2h_fence.seqno, action[0]); - xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno); + LNL_FLUSH_WORK(&ct->g2h_worker); + if (g2h_fence.done) { + xe_gt_warn(gt, "G2H fence %u, action %04x, done\n", + g2h_fence.seqno, action[0]); + ret = 1; + } + } + + /* + * Ensure we serialize with completion side to prevent UAF with fence going out of scope on + * the stack, since we have no clue if it will fire after the timeout before we can erase + * from the xa. Also we have some dependent loads and stores below for which we need the + * correct ordering, and we lack the needed barriers. + */ + mutex_lock(&ct->lock); + if (!ret) { + xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s", + g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done)); + xa_erase(&ct->fence_lookup, g2h_fence.seqno); + mutex_unlock(&ct->lock); return -ETIME; } if (g2h_fence.retry) { - drm_warn(&xe->drm, "Send retry, action 0x%04x, reason %d", - action[0], g2h_fence.reason); + xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n", + action[0], g2h_fence.reason); + mutex_unlock(&ct->lock); goto retry; } if (g2h_fence.fail) { - drm_err(&xe->drm, "Send failed, action 0x%04x, error %d, hint %d", - action[0], g2h_fence.error, g2h_fence.hint); + xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n", + action[0], g2h_fence.error, g2h_fence.hint); ret = -EIO; } - return ret > 0 ? 0 : ret; + if (ret > 0) + ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data; + + mutex_unlock(&ct->lock); + + return ret; } +/** + * xe_guc_ct_send_recv - Send and receive HXG to the GuC + * @ct: the &xe_guc_ct + * @action: the dword array with `HXG Request`_ message (can't be NULL) + * @len: length of the `HXG Request`_ message (in dwords, can't be 0) + * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL) + * + * Send a `HXG Request`_ message to the GuC over CT communication channel and + * blocks until GuC replies with a `HXG Response`_ message. + * + * For non-blocking communication with GuC use xe_guc_ct_send(). + * + * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_. + * + * Return: response length (in dwords) if &response_buffer was not NULL, or + * DATA0 from `HXG Response`_ if &response_buffer was NULL, or + * a negative error code on failure. + */ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 *response_buffer) { + KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer); return guc_ct_send_recv(ct, action, len, response_buffer, false); } +ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO); int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 *response_buffer) @@ -767,9 +1097,20 @@ int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action, return guc_ct_send_recv(ct, action, len, response_buffer, true); } +static u32 *msg_to_hxg(u32 *msg) +{ + return msg + GUC_CTB_MSG_MIN_LEN; +} + +static u32 msg_len_to_hxg_len(u32 len) +{ + return len - GUC_CTB_MSG_MIN_LEN; +} + static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) { - u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); + u32 *hxg = msg_to_hxg(msg); + u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); lockdep_assert_held(&ct->lock); @@ -784,40 +1125,80 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len) return 0; } +static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action) +{ + struct xe_gt *gt = ct_to_gt(ct); + + if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED) + xe_gt_err(gt, "GuC Crash dump notification\n"); + else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION) + xe_gt_err(gt, "GuC Exception notification\n"); + else + xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action); + + CT_DEAD(ct, NULL, CRASH); + + kick_reset(ct); + + return 0; +} + static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) { - struct xe_device *xe = ct_to_xe(ct); - u32 response_len = len - GUC_CTB_MSG_MIN_LEN; + struct xe_gt *gt = ct_to_gt(ct); + u32 *hxg = msg_to_hxg(msg); + u32 hxg_len = msg_len_to_hxg_len(len); u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, msg[0]); - u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]); + u32 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); struct g2h_fence *g2h_fence; lockdep_assert_held(&ct->lock); + /* + * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup. + * Those messages should never fail, so if we do get an error back it + * means we're likely doing an illegal operation and the GuC is + * rejecting it. We have no way to inform the code that submitted the + * H2G that the message was rejected, so we need to escalate the + * failure to trigger a reset. + */ + if (fence & CT_SEQNO_UNTRACKED) { + if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) + xe_gt_err(gt, "FAST_REQ H2G fence 0x%x failed! e=0x%x, h=%u\n", + fence, + FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]), + FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0])); + else + xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", + type, fence); + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); + + return -EPROTO; + } + g2h_fence = xa_erase(&ct->fence_lookup, fence); if (unlikely(!g2h_fence)) { /* Don't tear down channel, as send could've timed out */ - drm_warn(&xe->drm, "G2H fence (%u) not found!\n", fence); + /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ + xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); return 0; } - xe_assert(xe, fence == g2h_fence->seqno); + xe_gt_assert(gt, fence == g2h_fence->seqno); if (type == GUC_HXG_TYPE_RESPONSE_FAILURE) { g2h_fence->fail = true; - g2h_fence->error = - FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, msg[1]); - g2h_fence->hint = - FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, msg[1]); + g2h_fence->error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, hxg[0]); + g2h_fence->hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, hxg[0]); } else if (type == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { g2h_fence->retry = true; - g2h_fence->reason = - FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, msg[1]); + g2h_fence->reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, hxg[0]); } else if (g2h_fence->response_buffer) { - g2h_fence->response_len = response_len; - memcpy(g2h_fence->response_buffer, msg + GUC_CTB_MSG_MIN_LEN, - response_len * sizeof(u32)); + g2h_fence->response_len = hxg_len; + memcpy(g2h_fence->response_buffer, hxg, hxg_len * sizeof(u32)); + } else { + g2h_fence->response_data = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, hxg[0]); } g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); @@ -832,25 +1213,23 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) { - struct xe_device *xe = ct_to_xe(ct); - u32 hxg, origin, type; + struct xe_gt *gt = ct_to_gt(ct); + u32 *hxg = msg_to_hxg(msg); + u32 origin, type; int ret; lockdep_assert_held(&ct->lock); - hxg = msg[1]; - - origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg); + origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { - drm_err(&xe->drm, - "G2H channel broken on read, origin=%d, reset required\n", - origin); - ct->ctbs.g2h.info.broken = true; + xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", + origin); + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); return -EPROTO; } - type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg); + type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); switch (type) { case GUC_HXG_TYPE_EVENT: ret = parse_g2h_event(ct, msg, len); @@ -861,10 +1240,9 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) ret = parse_g2h_response(ct, msg, len); break; default: - drm_err(&xe->drm, - "G2H channel broken on read, type=%d, reset required\n", - type); - ct->ctbs.g2h.info.broken = true; + xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", + type); + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); ret = -EOPNOTSUPP; } @@ -874,16 +1252,21 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) { - struct xe_device *xe = ct_to_xe(ct); struct xe_guc *guc = ct_to_guc(ct); - u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); - u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN; - u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN; + struct xe_gt *gt = ct_to_gt(ct); + u32 hxg_len = msg_len_to_hxg_len(len); + u32 *hxg = msg_to_hxg(msg); + u32 action, adj_len; + u32 *payload; int ret = 0; - if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT) + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) return 0; + action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); + payload = hxg + GUC_HXG_EVENT_MSG_MIN_LEN; + adj_len = hxg_len - GUC_HXG_EVENT_MSG_MIN_LEN; + switch (action) { case XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: ret = xe_guc_sched_done_handler(guc, payload, adj_len); @@ -902,6 +1285,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) /* Selftest only at the moment */ break; case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: + ret = xe_guc_error_capture_handler(guc, payload, adj_len); + break; case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: /* FIXME: Handle this */ break; @@ -920,13 +1305,31 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) ret = xe_guc_access_counter_notify_handler(guc, payload, adj_len); break; + case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF: + ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len); + break; + case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF: + ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len); + break; + case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY: + ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len); + break; + case GUC_ACTION_GUC2PF_ADVERSE_EVENT: + ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len); + break; + case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED: + case XE_GUC_ACTION_NOTIFY_EXCEPTION: + ret = guc_crash_process_msg(ct, action); + break; default: - drm_err(&xe->drm, "unexpected action 0x%04x\n", action); + xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); } - if (ret) - drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n", - action, ret); + if (ret) { + xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n", + action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg); + CT_DEAD(ct, NULL, PROCESS_FAILED); + } return 0; } @@ -934,19 +1337,84 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) { struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; - u32 tail, head, len; + u32 tail, head, len, desc_status; s32 avail; u32 action; + u32 *hxg; + xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED); lockdep_assert_held(&ct->fast_lock); - if (!ct->enabled) + if (ct->state == XE_GUC_CT_STATE_DISABLED) return -ENODEV; + if (ct->state == XE_GUC_CT_STATE_STOPPED) + return -ECANCELED; + if (g2h->info.broken) return -EPIPE; + xe_gt_assert(gt, xe_guc_ct_enabled(ct)); + + desc_status = desc_read(xe, g2h, status); + if (desc_status) { + if (desc_status & GUC_CTB_STATUS_DISABLED) { + /* + * Potentially valid if a CLIENT_RESET request resulted in + * contexts/engines being reset. But should never happen as + * no contexts should be active when CLIENT_RESET is sent. + */ + xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); + desc_status &= ~GUC_CTB_STATUS_DISABLED; + } + + if (desc_status) { + xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); + goto corrupted; + } + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, g2h, tail); + /* + u32 desc_head = desc_read(xe, g2h, head); + + * info.head and desc_head are updated back-to-back at the end of + * this function and nowhere else. Hence, they cannot be different + * unless two g2h_read calls are running concurrently. Which is not + * possible because it is guarded by ct->fast_lock. And yet, some + * discrete platforms are regularly hitting this error :(. + * + * desc_head rolling backwards shouldn't cause any noticeable + * problems - just a delay in GuC being allowed to proceed past that + * point in the queue. So for now, just disable the error until it + * can be root caused. + * + if (g2h->info.head != desc_head) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT read: head was modified %u != %u\n", + desc_head, g2h->info.head); + goto corrupted; + } + */ + + if (g2h->info.head > g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", + g2h->info.head, g2h->info.size); + goto corrupted; + } + + if (desc_tail >= g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", + desc_tail, g2h->info.size); + goto corrupted; + } + } + /* Calculate DW available to read */ tail = desc_read(xe, g2h, tail); avail = tail - g2h->info.head; @@ -961,12 +1429,9 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) sizeof(u32)); len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN; if (len > avail) { - drm_err(&xe->drm, - "G2H channel broken on read, avail=%d, len=%d, reset required\n", - avail, len); - g2h->info.broken = true; - - return -EPROTO; + xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", + avail, len); + goto corrupted; } head = (g2h->info.head + 1) % g2h->info.size; @@ -988,10 +1453,11 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) avail * sizeof(u32)); } - action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); + hxg = msg_to_hxg(msg); + action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); if (fast_path) { - if (FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[1]) != GUC_HXG_TYPE_EVENT) + if (FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT) return 0; switch (action) { @@ -1007,19 +1473,25 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) g2h->info.head = (head + avail) % g2h->info.size; desc_write(xe, g2h, head, g2h->info.head); - trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len, - g2h->info.head, tail); + trace_xe_guc_ctb_g2h(xe, ct_to_gt(ct)->info.id, + action, len, g2h->info.head, tail); return len; + +corrupted: + CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); + return -EPROTO; } static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) { - struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); struct xe_guc *guc = ct_to_guc(ct); - u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[1]); - u32 *payload = msg + GUC_CTB_HXG_MSG_MIN_LEN; - u32 adj_len = len - GUC_CTB_HXG_MSG_MIN_LEN; + u32 hxg_len = msg_len_to_hxg_len(len); + u32 *hxg = msg_to_hxg(msg); + u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); + u32 *payload = hxg + GUC_HXG_MSG_MIN_LEN; + u32 adj_len = hxg_len - GUC_HXG_MSG_MIN_LEN; int ret = 0; switch (action) { @@ -1032,12 +1504,14 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) adj_len); break; default: - drm_warn(&xe->drm, "NOT_POSSIBLE"); + xe_gt_warn(gt, "NOT_POSSIBLE"); } - if (ret) - drm_err(&xe->drm, "action 0x%04x failed processing, ret=%d\n", - action, ret); + if (ret) { + xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", + action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, FAST_G2H); + } } /** @@ -1054,7 +1528,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct) bool ongoing; int len; - ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct)); + ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) return; @@ -1067,7 +1541,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct) spin_unlock(&ct->fast_lock); if (ongoing) - xe_device_mem_access_put(xe); + xe_pm_runtime_put(xe); } /* Returns less than zero on error, 0 on done, 1 on more available */ @@ -1095,9 +1569,8 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct) return 1; } -static void g2h_worker_func(struct work_struct *w) +static void receive_g2h(struct xe_guc_ct *ct) { - struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); bool ongoing; int ret; @@ -1124,7 +1597,7 @@ static void g2h_worker_func(struct work_struct *w) * responses, if the worker here is blocked on those callbacks * completing, creating a deadlock. */ - ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct)); + ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct)); if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL) return; @@ -1134,61 +1607,51 @@ static void g2h_worker_func(struct work_struct *w) mutex_unlock(&ct->lock); if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { - struct drm_device *drm = &ct_to_xe(ct)->drm; - struct drm_printer p = drm_info_printer(drm->dev); - - xe_guc_ct_print(ct, &p, false); + xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); + CT_DEAD(ct, NULL, G2H_RECV); kick_reset(ct); } } while (ret == 1); if (ongoing) - xe_device_mem_access_put(ct_to_xe(ct)); + xe_pm_runtime_put(ct_to_xe(ct)); } -static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, - struct guc_ctb_snapshot *snapshot, - bool atomic) +static void g2h_worker_func(struct work_struct *w) { - u32 head, tail; + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker); - xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, - sizeof(struct guc_ct_buffer_desc)); - memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); + receive_g2h(ct); +} - snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), - atomic ? GFP_ATOMIC : GFP_KERNEL); +static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) +{ + struct xe_guc_ct_snapshot *snapshot; - if (!snapshot->cmds) { - drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); - return; - } + snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!snapshot) + return NULL; - head = snapshot->desc.head; - tail = snapshot->desc.tail; - - if (head != tail) { - struct iosys_map map = - IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32)); - - while (head != tail) { - snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32); - ++head; - if (head == ctb->info.size) { - head = 0; - map = ctb->cmds; - } else { - iosys_map_incr(&map, sizeof(u32)); - } - } + if (ct->bo && want_ctb) { + snapshot->ctb_size = ct->bo->size; + snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); } + + return snapshot; +} + +static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, + struct guc_ctb_snapshot *snapshot) +{ + xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0, + sizeof(struct guc_ct_buffer_desc)); + memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info)); } static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, struct drm_printer *p) { - u32 head, tail; - drm_printf(p, "\tsize: %d\n", snapshot->info.size); drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space); drm_printf(p, "\thead: %d\n", snapshot->info.head); @@ -1198,63 +1661,46 @@ static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot, drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head); drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail); drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status); +} - if (!snapshot->cmds) - return; +static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic, + bool want_ctb) +{ + struct xe_device *xe = ct_to_xe(ct); + struct xe_guc_ct_snapshot *snapshot; - head = snapshot->desc.head; - tail = snapshot->desc.tail; + snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb); + if (!snapshot) { + xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); + return NULL; + } - while (head != tail) { - drm_printf(p, "\tcmd[%d]: 0x%08x\n", head, - snapshot->cmds[head]); - ++head; - if (head == snapshot->info.size) - head = 0; + if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) { + snapshot->ct_enabled = true; + snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); + guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g); + guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h); } -} -static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot) -{ - kfree(snapshot->cmds); + if (ct->bo && snapshot->ctb) + xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size); + + return snapshot; } /** * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state. * @ct: GuC CT object. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. * * This can be printed out in a later stage like during dev_coredump - * analysis. + * analysis. This is safe to be called during atomic context. * * Returns: a GuC CT snapshot object that must be freed by the caller * by using `xe_guc_ct_snapshot_free`. */ -struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, - bool atomic) +struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct) { - struct xe_device *xe = ct_to_xe(ct); - struct xe_guc_ct_snapshot *snapshot; - - snapshot = kzalloc(sizeof(*snapshot), - atomic ? GFP_ATOMIC : GFP_KERNEL); - - if (!snapshot) { - drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); - return NULL; - } - - if (ct->enabled) { - snapshot->ct_enabled = true; - snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding); - guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, - &snapshot->h2g, atomic); - guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, - &snapshot->g2h, atomic); - } - - return snapshot; + return guc_ct_snapshot_capture(ct, true, true); } /** @@ -1271,16 +1717,21 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, return; if (snapshot->ct_enabled) { - drm_puts(p, "\nH2G CTB (all sizes in DW):\n"); + drm_puts(p, "H2G CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->h2g, p); - drm_puts(p, "\nG2H CTB (all sizes in DW):\n"); + drm_puts(p, "G2H CTB (all sizes in DW):\n"); guc_ctb_snapshot_print(&snapshot->g2h, p); - drm_printf(p, "\tg2h outstanding: %d\n", snapshot->g2h_outstanding); + + if (snapshot->ctb) { + drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size); + xe_print_blob_ascii85(p, "[CTB].data", '\n', + snapshot->ctb, 0, snapshot->ctb_size); + } } else { - drm_puts(p, "\nCT disabled\n"); + drm_puts(p, "CT disabled\n"); } } @@ -1296,8 +1747,7 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) if (!snapshot) return; - guc_ctb_snapshot_free(&snapshot->h2g); - guc_ctb_snapshot_free(&snapshot->g2h); + kfree(snapshot->ctb); kfree(snapshot); } @@ -1305,16 +1755,121 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) * xe_guc_ct_print - GuC CT Print. * @ct: GuC CT. * @p: drm_printer where it will be printed out. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. + * @want_ctb: Should the full CTB content be dumped (vs just the headers) * - * This function quickly capture a snapshot and immediately print it out. + * This function will quickly capture a snapshot of the CT state + * and immediately print it out. */ -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb) { struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_capture(ct, atomic); + snapshot = guc_ct_snapshot_capture(ct, false, want_ctb); xe_guc_ct_snapshot_print(snapshot, p); xe_guc_ct_snapshot_free(snapshot); } + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) +{ + struct xe_guc_log_snapshot *snapshot_log; + struct xe_guc_ct_snapshot *snapshot_ct; + struct xe_guc *guc = ct_to_guc(ct); + unsigned long flags; + bool have_capture; + + if (ctb) + ctb->info.broken = true; + + /* Ignore further errors after the first dump until a reset */ + if (ct->dead.reported) + return; + + spin_lock_irqsave(&ct->dead.lock, flags); + + /* And only capture one dump at a time */ + have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); + ct->dead.reason |= (1 << reason_code) | + (1 << CT_DEAD_STATE_CAPTURE); + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + if (have_capture) + return; + + snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); + snapshot_ct = xe_guc_ct_snapshot_capture((ct)); + + spin_lock_irqsave(&ct->dead.lock, flags); + + if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { + xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); + xe_guc_log_snapshot_free(snapshot_log); + xe_guc_ct_snapshot_free(snapshot_ct); + } else { + ct->dead.snapshot_log = snapshot_log; + ct->dead.snapshot_ct = snapshot_ct; + } + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + queue_work(system_unbound_wq, &(ct)->dead.worker); +} + +static void ct_dead_print(struct xe_dead_ct *dead) +{ + struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + if (!dead->reason) { + xe_gt_err(gt, "CTB is dead for no reason!?\n"); + return; + } + + + /* Can't generate a genuine core dump at this point, so just do the good bits */ + drm_puts(&lp, "**** Xe Device Coredump ****\n"); + drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason); + xe_device_snapshot_print(xe, &lp); + + drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); + drm_printf(&lp, "\tTile: %d\n", gt->tile->id); + + drm_puts(&lp, "**** GuC Log ****\n"); + xe_guc_log_snapshot_print(dead->snapshot_log, &lp); + + drm_puts(&lp, "**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp); + + drm_puts(&lp, "Done.\n"); +} + +static void ct_dead_worker_func(struct work_struct *w) +{ + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); + + if (!ct->dead.reported) { + ct->dead.reported = true; + ct_dead_print(&ct->dead); + } + + spin_lock_irq(&ct->dead.lock); + + xe_guc_log_snapshot_free(ct->dead.snapshot_log); + ct->dead.snapshot_log = NULL; + xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); + ct->dead.snapshot_ct = NULL; + + if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { + /* A reset has occurred so re-arm the error reporting */ + ct->dead.reason = 0; + ct->dead.reported = false; + } + + spin_unlock_irq(&ct->dead.lock); +} +#endif |