summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_pci_sriov.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_pci_sriov.c')
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c266
1 files changed, 266 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
new file mode 100644
index 000000000000..9ff69c4843b0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <linux/bitops.h>
+#include <linux/pci.h>
+
+#include "regs/xe_bars.h"
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_pci_sriov.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_sysfs.h"
+#include "xe_sriov_printk.h"
+
+static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ unsigned int n;
+
+ for (n = 1; n <= num_vfs; n++)
+ xe_sriov_pf_control_reset_vf(xe, n);
+}
+
+static void pf_link_vfs(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
+ struct device_link *link;
+ struct pci_dev *pdev_vf;
+ unsigned int n;
+
+ /*
+ * When both PF and VF devices are enabled on the host, during system
+ * resume they are resuming in parallel.
+ *
+ * But PF has to complete the provision of VF first to allow any VFs to
+ * successfully resume.
+ *
+ * Create a parent-child device link between PF and VF devices that will
+ * enforce correct resume order.
+ */
+ for (n = 1; n <= num_vfs; n++) {
+ pdev_vf = xe_pci_sriov_get_vf_pdev(pdev_pf, n);
+
+ /* unlikely, something weird is happening, abort */
+ if (!pdev_vf) {
+ xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
+ n, str_plural(num_vfs));
+ break;
+ }
+
+ link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ /* unlikely and harmless, continue with other VFs */
+ if (!link)
+ xe_sriov_notice(xe, "Failed linking VF%u\n", n);
+
+ pci_dev_put(pdev_vf);
+ }
+}
+
+static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_engine_activity_function_stats(&gt->uc.guc, num_vfs, enable);
+ if (ret)
+ xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ }
+}
+
+static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u32 sizes;
+
+ sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
+ if (!sizes)
+ return 0;
+
+ return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
+}
+
+static int pf_prepare_vfs_enabling(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ /* make sure we are not locked-down by other components */
+ return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL);
+}
+
+static void pf_finish_vfs_enabling(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ /* allow other components to lockdown VFs enabling */
+ xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL);
+}
+
+static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int total_vfs = xe_sriov_pf_get_totalvfs(xe);
+ int err;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, num_vfs > 0);
+ xe_assert(xe, num_vfs <= total_vfs);
+ xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs));
+
+ err = xe_sriov_pf_wait_ready(xe);
+ if (err)
+ goto out;
+
+ err = pf_prepare_vfs_enabling(xe);
+ if (err)
+ goto out;
+
+ /*
+ * We must hold additional reference to the runtime PM to keep PF in D0
+ * during VFs lifetime, as our VFs do not implement the PM capability.
+ *
+ * With PF being in D0 state, all VFs will also behave as in D0 state.
+ * This will also keep GuC alive with all VFs' configurations.
+ *
+ * We will release this additional PM reference in pf_disable_vfs().
+ */
+ xe_pm_runtime_get_noresume(xe);
+
+ err = xe_sriov_pf_provision_vfs(xe, num_vfs);
+ if (err < 0)
+ goto failed;
+
+ if (IS_DGFX(xe)) {
+ err = resize_vf_vram_bar(xe, num_vfs);
+ if (err)
+ xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
+ }
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err < 0)
+ goto failed;
+
+ pf_link_vfs(xe, num_vfs);
+
+ xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
+ num_vfs, total_vfs, str_plural(total_vfs));
+
+ xe_sriov_pf_sysfs_link_vfs(xe, num_vfs);
+
+ pf_engine_activity_stats(xe, num_vfs, true);
+
+ return num_vfs;
+
+failed:
+ xe_sriov_pf_unprovision_vfs(xe, num_vfs);
+ xe_pm_runtime_put(xe);
+ pf_finish_vfs_enabling(xe);
+out:
+ xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
+ num_vfs, str_plural(num_vfs), ERR_PTR(err));
+ return err;
+}
+
+static int pf_disable_vfs(struct xe_device *xe)
+{
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 num_vfs = pci_num_vf(pdev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_sriov_dbg(xe, "disabling %u VF%s\n", num_vfs, str_plural(num_vfs));
+
+ if (!num_vfs)
+ return 0;
+
+ pf_engine_activity_stats(xe, num_vfs, false);
+
+ xe_sriov_pf_sysfs_unlink_vfs(xe, num_vfs);
+
+ pci_disable_sriov(pdev);
+
+ pf_reset_vfs(xe, num_vfs);
+
+ xe_sriov_pf_unprovision_vfs(xe, num_vfs);
+
+ /* not needed anymore - see pf_enable_vfs() */
+ xe_pm_runtime_put(xe);
+
+ pf_finish_vfs_enabling(xe);
+
+ xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs));
+ return 0;
+}
+
+/**
+ * xe_pci_sriov_configure - Configure SR-IOV (enable/disable VFs).
+ * @pdev: the &pci_dev
+ * @num_vfs: number of VFs to enable or zero to disable all VFs
+ *
+ * This is the Xe implementation of struct pci_driver.sriov_configure callback.
+ *
+ * This callback will be called by the PCI subsystem to enable or disable SR-IOV
+ * Virtual Functions (VFs) as requested by the used via the PCI sysfs interface.
+ *
+ * Return: number of configured VFs or a negative error code on failure.
+ */
+int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ int ret;
+
+ if (!IS_SRIOV_PF(xe))
+ return -ENODEV;
+
+ if (num_vfs < 0)
+ return -EINVAL;
+
+ if (num_vfs > xe_sriov_pf_get_totalvfs(xe))
+ return -ERANGE;
+
+ if (num_vfs && pci_num_vf(pdev))
+ return -EBUSY;
+
+ xe_pm_runtime_get(xe);
+ if (num_vfs > 0)
+ ret = pf_enable_vfs(xe, num_vfs);
+ else
+ ret = pf_disable_vfs(xe);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+/**
+ * xe_pci_sriov_get_vf_pdev() - Lookup the VF's PCI device using the VF identifier.
+ * @pdev: the PF's &pci_dev
+ * @vfid: VF identifier (1-based)
+ *
+ * The caller must decrement the reference count by calling pci_dev_put().
+ *
+ * Return: the VF's &pci_dev or NULL if the VF device was not found.
+ */
+struct pci_dev *xe_pci_sriov_get_vf_pdev(struct pci_dev *pdev, unsigned int vfid)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+
+ xe_assert(xe, dev_is_pf(&pdev->dev));
+ xe_assert(xe, vfid);
+ xe_assert(xe, vfid <= pci_sriov_get_totalvfs(pdev));
+
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ pci_iov_virtfn_devfn(pdev, vfid - 1));
+}