summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/xe/Makefile1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c257
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h27
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c5
4 files changed, 290 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 705c0eaf6e71..21316ee47026 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -156,6 +156,7 @@ xe-y += \
xe_sriov.o
xe-$(CONFIG_PCI_IOV) += \
+ xe_gt_sriov_pf_control.o \
xe_lmtt.o \
xe_lmtt_2l.o \
xe_lmtt_ml.o
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
new file mode 100644
index 000000000000..40b8f881fe04
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include "abi/guc_actions_sriov_abi.h"
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_ct.h"
+#include "xe_sriov.h"
+
+static const char *control_cmd_to_string(u32 cmd)
+{
+ switch (cmd) {
+ case GUC_PF_TRIGGER_VF_PAUSE:
+ return "PAUSE";
+ case GUC_PF_TRIGGER_VF_RESUME:
+ return "RESUME";
+ case GUC_PF_TRIGGER_VF_STOP:
+ return "STOP";
+ case GUC_PF_TRIGGER_VF_FLR_START:
+ return "FLR_START";
+ case GUC_PF_TRIGGER_VF_FLR_FINISH:
+ return "FLR_FINISH";
+ default:
+ return "<unknown>";
+ }
+}
+
+static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
+{
+ u32 request[PF2GUC_VF_CONTROL_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_VF_CONTROL),
+ FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_1_VFID, vfid),
+ FIELD_PREP(PF2GUC_VF_CONTROL_REQUEST_MSG_2_COMMAND, cmd),
+ };
+ int ret;
+
+ /* XXX those two commands are now sent from the G2H handler */
+ if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
+ return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
+
+ ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+ return ret > 0 ? -EPROTO : ret;
+}
+
+static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid != PFID);
+
+ err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "VF%u control command %s failed (%pe)\n",
+ vfid, control_cmd_to_string(cmd), ERR_PTR(err));
+ return err;
+}
+
+static int pf_send_vf_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_PAUSE);
+}
+
+static int pf_send_vf_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_RESUME);
+}
+
+static int pf_send_vf_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_STOP);
+}
+
+static int pf_send_vf_flr_start(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_START);
+}
+
+static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_control_cmd(gt, vfid, GUC_PF_TRIGGER_VF_FLR_FINISH);
+}
+
+/**
+ * xe_gt_sriov_pf_control_pause_vf - Pause a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_pause(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_resume_vf - Resume a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_resume(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_stop_vf - Stop a VF.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_send_vf_stop(gt, vfid);
+}
+
+/**
+ * DOC: The VF FLR Flow with GuC
+ *
+ * PF GUC PCI
+ * ========================================================
+ * | | |
+ * (1) | [ ] <----- FLR --|
+ * | [ ] :
+ * (2) [ ] <-------- NOTIFY FLR --[ ]
+ * [ ] |
+ * (3) [ ] |
+ * [ ] |
+ * [ ]-- START FLR ---------> [ ]
+ * | [ ]
+ * (4) | [ ]
+ * | [ ]
+ * [ ] <--------- FLR DONE -- [ ]
+ * [ ] |
+ * (5) [ ] |
+ * [ ] |
+ * [ ]-- FINISH FLR --------> [ ]
+ * | |
+ *
+ * Step 1: PCI HW generates interrupt to the GuC about VF FLR
+ * Step 2: GuC FW sends G2H notification to the PF about VF FLR
+ * Step 2a: on some platforms G2H is only received from root GuC
+ * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
+ * Step 3a: on some platforms PF must send H2G to all other GuCs
+ * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
+ * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
+ */
+
+static bool needs_dispatch_flr(struct xe_device *xe)
+{
+ return xe->info.platform == XE_PVC;
+}
+
+static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_gt *gtit;
+ unsigned int gtid;
+
+ xe_gt_sriov_info(gt, "VF%u FLR\n", vfid);
+
+ if (needs_dispatch_flr(xe)) {
+ for_each_gt(gtit, xe, gtid)
+ pf_send_vf_flr_start(gtit, vfid);
+ } else {
+ pf_send_vf_flr_start(gt, vfid);
+ }
+}
+
+static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
+{
+ pf_send_vf_flr_finish(gt, vfid);
+}
+
+static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
+{
+ switch (eventid) {
+ case GUC_PF_NOTIFY_VF_FLR:
+ pf_handle_vf_flr(gt, vfid);
+ break;
+ case GUC_PF_NOTIFY_VF_FLR_DONE:
+ pf_handle_vf_flr_done(gt, vfid);
+ break;
+ case GUC_PF_NOTIFY_VF_PAUSE_DONE:
+ break;
+ case GUC_PF_NOTIFY_VF_FIXUP_DONE:
+ break;
+ default:
+ return -ENOPKG;
+ }
+ return 0;
+}
+
+static int pf_handle_pf_event(struct xe_gt *gt, u32 eventid)
+{
+ switch (eventid) {
+ case GUC_PF_NOTIFY_VF_ENABLE:
+ xe_gt_sriov_dbg_verbose(gt, "VFs %s/%s\n",
+ str_enabled_disabled(true),
+ str_enabled_disabled(false));
+ break;
+ default:
+ return -ENOPKG;
+ }
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_process_guc2pf - Handle VF state notification from GuC.
+ * @gt: the &xe_gt
+ * @msg: the G2H message
+ * @len: the length of the G2H message
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
+{
+ u32 vfid;
+ u32 eventid;
+
+ xe_gt_assert(gt, len);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, msg[0]) == GUC_HXG_ORIGIN_GUC);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_MSG_0_TYPE, msg[0]) == GUC_HXG_TYPE_EVENT);
+ xe_gt_assert(gt, FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, msg[0]) ==
+ GUC_ACTION_GUC2PF_VF_STATE_NOTIFY);
+
+ if (unlikely(!xe_device_is_sriov_pf(gt_to_xe(gt))))
+ return -EPROTO;
+
+ if (unlikely(FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_0_MBZ, msg[0])))
+ return -EPFNOSUPPORT;
+
+ if (unlikely(len != GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_LEN))
+ return -EPROTO;
+
+ vfid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_1_VFID, msg[1]);
+ eventid = FIELD_GET(GUC2PF_VF_STATE_NOTIFY_EVENT_MSG_2_EVENT, msg[2]);
+
+ return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
new file mode 100644
index 000000000000..850a3e37661f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONTROL_H_
+#define _XE_GT_SRIOV_PF_CONTROL_H_
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct xe_gt;
+
+int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
+
+#ifdef CONFIG_PCI_IOV
+int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len);
+#else
+static inline int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len)
+{
+ return -EPROTO;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 1d930a8eeeca..f4890e9a1e93 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -21,6 +21,7 @@
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_relay.h"
@@ -1008,6 +1009,7 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u32 hxg_len = msg_len_to_hxg_len(len);
u32 *hxg = msg_to_hxg(msg);
u32 action, adj_len;
@@ -1063,6 +1065,9 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
break;
+ case GUC_ACTION_GUC2PF_VF_STATE_NOTIFY:
+ ret = xe_gt_sriov_pf_control_process_guc2pf(gt, hxg, hxg_len);
+ break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
}