summaryrefslogtreecommitdiff
path: root/drivers/cdx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cdx')
-rw-r--r--drivers/cdx/Kconfig19
-rw-r--r--drivers/cdx/Makefile14
-rw-r--r--drivers/cdx/cdx.c978
-rw-r--r--drivers/cdx/cdx.h94
-rw-r--r--drivers/cdx/cdx_msi.c193
-rw-r--r--drivers/cdx/controller/Kconfig22
-rw-r--r--drivers/cdx/controller/Makefile9
-rw-r--r--drivers/cdx/controller/cdx_controller.c258
-rw-r--r--drivers/cdx/controller/cdx_controller.h30
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c202
-rw-r--r--drivers/cdx/controller/mc_cdx_pcol.h708
-rw-r--r--drivers/cdx/controller/mcdi.c870
-rw-r--r--drivers/cdx/controller/mcdi_functions.c256
-rw-r--r--drivers/cdx/controller/mcdi_functions.h126
-rw-r--r--drivers/cdx/controller/mcdid.h63
15 files changed, 3842 insertions, 0 deletions
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
new file mode 100644
index 000000000000..1f1e360507d7
--- /dev/null
+++ b/drivers/cdx/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# CDX bus configuration
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+config CDX_BUS
+ bool "CDX Bus driver"
+ depends on OF && ARM64 || COMPILE_TEST
+ help
+ Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
+ exposes Fabric devices which uses composable DMA IP to the
+ APU. CDX bus provides a mechanism for scanning and probing
+ of CDX devices. CDX devices are memory mapped on system bus
+ for embedded CPUs. CDX bus uses CDX controller and firmware
+ to scan these CDX devices.
+
+source "drivers/cdx/controller/Kconfig"
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
new file mode 100644
index 000000000000..3ca7068a3052
--- /dev/null
+++ b/drivers/cdx/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for CDX
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CDX_BUS"'
+
+obj-$(CONFIG_CDX_BUS) += cdx.o controller/
+
+ifdef CONFIG_GENERIC_MSI_IRQ
+obj-$(CONFIG_CDX_BUS) += cdx_msi.o
+endif
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
new file mode 100644
index 000000000000..b39af2f1937f
--- /dev/null
+++ b/drivers/cdx/cdx.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CDX bus driver.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+/*
+ * Architecture Overview
+ * =====================
+ * CDX is a Hardware Architecture designed for AMD FPGA devices. It
+ * consists of sophisticated mechanism for interaction between FPGA,
+ * Firmware and the APUs (Application CPUs).
+ *
+ * Firmware resides on RPU (Realtime CPUs) which interacts with
+ * the FPGA program manager and the APUs. The RPU provides memory-mapped
+ * interface (RPU if) which is used to communicate with APUs.
+ *
+ * The diagram below shows an overview of the CDX architecture:
+ *
+ * +--------------------------------------+
+ * | Application CPUs (APU) |
+ * | |
+ * | CDX device drivers|
+ * | Linux OS | |
+ * | CDX bus |
+ * | | |
+ * | CDX controller |
+ * | | |
+ * +-----------------------------|--------+
+ * | (discover, config,
+ * | reset, rescan)
+ * |
+ * +------------------------| RPU if |----+
+ * | | |
+ * | V |
+ * | Realtime CPUs (RPU) |
+ * | |
+ * +--------------------------------------+
+ * |
+ * +---------------------|----------------+
+ * | FPGA | |
+ * | +-----------------------+ |
+ * | | | | |
+ * | +-------+ +-------+ +-------+ |
+ * | | dev 1 | | dev 2 | | dev 3 | |
+ * | +-------+ +-------+ +-------+ |
+ * +--------------------------------------+
+ *
+ * The RPU firmware extracts the device information from the loaded FPGA
+ * image and implements a mechanism that allows the APU drivers to
+ * enumerate such devices (device personality and resource details) via
+ * a dedicated communication channel. RPU mediates operations such as
+ * discover, reset and rescan of the FPGA devices for the APU. This is
+ * done using memory mapped interface provided by the RPU to APU.
+ */
+
+#include <linux/init.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/idr.h>
+#include <linux/cdx/cdx_bus.h>
+#include <linux/iommu.h>
+#include <linux/dma-map-ops.h>
+#include <linux/debugfs.h>
+#include "cdx.h"
+
+/* Default DMA mask for devices on a CDX bus */
+#define CDX_DEFAULT_DMA_MASK (~0ULL)
+#define MAX_CDX_CONTROLLERS 16
+
+/* IDA for CDX controllers registered with the CDX bus */
+static DEFINE_IDA(cdx_controller_ida);
+/* Lock to protect controller ops */
+static DEFINE_MUTEX(cdx_controller_lock);
+/* Debugfs dir for cdx bus */
+static struct dentry *cdx_debugfs_dir;
+
+static char *compat_node_name = "xlnx,versal-net-cdx";
+
+static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num);
+
+/**
+ * cdx_dev_reset - Reset a CDX device
+ * @dev: CDX device
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int cdx_dev_reset(struct device *dev)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config = {0};
+ struct cdx_driver *cdx_drv;
+ int ret;
+
+ cdx_drv = to_cdx_driver(dev->driver);
+ /* Notify driver that device is being reset */
+ if (cdx_drv && cdx_drv->reset_prepare)
+ cdx_drv->reset_prepare(cdx_dev);
+
+ dev_config.type = CDX_DEV_RESET_CONF;
+ ret = cdx->ops->dev_configure(cdx, cdx_dev->bus_num,
+ cdx_dev->dev_num, &dev_config);
+ if (ret)
+ dev_err(dev, "cdx device reset failed\n");
+
+ /* Notify driver that device reset is complete */
+ if (cdx_drv && cdx_drv->reset_done)
+ cdx_drv->reset_done(cdx_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdx_dev_reset);
+
+/**
+ * reset_cdx_device - Reset a CDX device
+ * @dev: CDX device
+ * @data: This is always passed as NULL, and is not used in this API,
+ * but is required here as the device_for_each_child() API expects
+ * the passed function to have this as an argument.
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+static int reset_cdx_device(struct device *dev, void *data)
+{
+ return cdx_dev_reset(dev);
+}
+
+/**
+ * cdx_unregister_device - Unregister a CDX device
+ * @dev: CDX device
+ * @data: This is always passed as NULL, and is not used in this API,
+ * but is required here as the bus_for_each_dev() API expects
+ * the passed function (cdx_unregister_device) to have this
+ * as an argument.
+ *
+ * Return: 0 on success.
+ */
+static int cdx_unregister_device(struct device *dev,
+ void *data)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+
+ if (cdx_dev->is_bus) {
+ device_for_each_child(dev, NULL, cdx_unregister_device);
+ if (cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
+ } else {
+ cdx_destroy_res_attr(cdx_dev, MAX_CDX_DEV_RESOURCES);
+ debugfs_remove_recursive(cdx_dev->debugfs_dir);
+ kfree(cdx_dev->driver_override);
+ cdx_dev->driver_override = NULL;
+ }
+
+ /*
+ * Do not free cdx_dev here as it would be freed in
+ * cdx_device_release() called from within put_device().
+ */
+ device_del(&cdx_dev->dev);
+ put_device(&cdx_dev->dev);
+
+ return 0;
+}
+
+static void cdx_unregister_devices(const struct bus_type *bus)
+{
+ /* Reset all the devices attached to cdx bus */
+ bus_for_each_dev(bus, NULL, NULL, cdx_unregister_device);
+}
+
+/**
+ * cdx_match_one_device - Tell if a CDX device structure has a matching
+ * CDX device id structure
+ * @id: single CDX device id structure to match
+ * @dev: the CDX device structure to match against
+ *
+ * Return: matching cdx_device_id structure or NULL if there is no match.
+ */
+static inline const struct cdx_device_id *
+cdx_match_one_device(const struct cdx_device_id *id,
+ const struct cdx_device *dev)
+{
+ /* Use vendor ID and device ID for matching */
+ if ((id->vendor == CDX_ANY_ID || id->vendor == dev->vendor) &&
+ (id->device == CDX_ANY_ID || id->device == dev->device) &&
+ (id->subvendor == CDX_ANY_ID || id->subvendor == dev->subsystem_vendor) &&
+ (id->subdevice == CDX_ANY_ID || id->subdevice == dev->subsystem_device) &&
+ !((id->class ^ dev->class) & id->class_mask))
+ return id;
+ return NULL;
+}
+
+/**
+ * cdx_match_id - See if a CDX device matches a given cdx_id table
+ * @ids: array of CDX device ID structures to search in
+ * @dev: the CDX device structure to match against.
+ *
+ * Used by a driver to check whether a CDX device is in its list of
+ * supported devices. Returns the matching cdx_device_id structure or
+ * NULL if there is no match.
+ *
+ * Return: matching cdx_device_id structure or NULL if there is no match.
+ */
+static inline const struct cdx_device_id *
+cdx_match_id(const struct cdx_device_id *ids, struct cdx_device *dev)
+{
+ if (ids) {
+ while (ids->vendor || ids->device) {
+ if (cdx_match_one_device(ids, dev))
+ return ids;
+ ids++;
+ }
+ }
+ return NULL;
+}
+
+int cdx_set_master(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+ int ret = -EOPNOTSUPP;
+
+ dev_config.type = CDX_DEV_BUS_MASTER_CONF;
+ dev_config.bus_master_enable = true;
+ if (cdx->ops->dev_configure)
+ ret = cdx->ops->dev_configure(cdx, cdx_dev->bus_num,
+ cdx_dev->dev_num, &dev_config);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdx_set_master);
+
+int cdx_clear_master(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+ int ret = -EOPNOTSUPP;
+
+ dev_config.type = CDX_DEV_BUS_MASTER_CONF;
+ dev_config.bus_master_enable = false;
+ if (cdx->ops->dev_configure)
+ ret = cdx->ops->dev_configure(cdx, cdx_dev->bus_num,
+ cdx_dev->dev_num, &dev_config);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cdx_clear_master);
+
+/**
+ * cdx_bus_match - device to driver matching callback
+ * @dev: the cdx device to match against
+ * @drv: the device driver to search for matching cdx device
+ * structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static int cdx_bus_match(struct device *dev, const struct device_driver *drv)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ const struct cdx_driver *cdx_drv = to_cdx_driver(drv);
+ const struct cdx_device_id *found_id = NULL;
+ const struct cdx_device_id *ids;
+
+ if (cdx_dev->is_bus)
+ return false;
+
+ ids = cdx_drv->match_id_table;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (cdx_dev->driver_override && strcmp(cdx_dev->driver_override, drv->name))
+ return false;
+
+ found_id = cdx_match_id(ids, cdx_dev);
+ if (!found_id)
+ return false;
+
+ do {
+ /*
+ * In case override_only was set, enforce driver_override
+ * matching.
+ */
+ if (!found_id->override_only)
+ return true;
+ if (cdx_dev->driver_override)
+ return true;
+
+ ids = found_id + 1;
+ found_id = cdx_match_id(ids, cdx_dev);
+ } while (found_id);
+
+ return false;
+}
+
+static int cdx_probe(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ int error;
+
+ /*
+ * Setup MSI device data so that generic MSI alloc/free can
+ * be used by the device driver.
+ */
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
+ error = msi_setup_device_data(&cdx_dev->dev);
+ if (error)
+ return error;
+ }
+
+ error = cdx_drv->probe(cdx_dev);
+ if (error) {
+ dev_err_probe(dev, error, "%s failed\n", __func__);
+ return error;
+ }
+
+ return 0;
+}
+
+static void cdx_remove(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ if (cdx_drv && cdx_drv->remove)
+ cdx_drv->remove(cdx_dev);
+}
+
+static void cdx_shutdown(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+
+ if (cdx_dev->is_bus && cdx_dev->enabled && cdx->ops->bus_disable)
+ cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
+ if (cdx_drv && cdx_drv->shutdown)
+ cdx_drv->shutdown(cdx_dev);
+}
+
+static int cdx_dma_configure(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ u32 input_id = cdx_dev->req_id;
+ int ret;
+
+ ret = of_dma_configure_id(dev, cdx->dev->of_node, 0, &input_id);
+ if (ret && ret != -EPROBE_DEFER) {
+ dev_err(dev, "of_dma_configure_id() failed\n");
+ return ret;
+ }
+
+ /* @cdx_drv may not be valid when we're called from the IOMMU layer */
+ if (!ret && dev->driver && !cdx_drv->driver_managed_dma) {
+ ret = iommu_device_use_default_domain(dev);
+ if (ret)
+ arch_teardown_dma_ops(dev);
+ }
+
+ return 0;
+}
+
+static void cdx_dma_cleanup(struct device *dev)
+{
+ struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
+
+ if (!cdx_drv->driver_managed_dma)
+ iommu_device_unuse_default_domain(dev);
+}
+
+/* show configuration fields */
+#define cdx_config_attr(field, format_string) \
+static ssize_t \
+field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct cdx_device *cdx_dev = to_cdx_device(dev); \
+ return sysfs_emit(buf, format_string, cdx_dev->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+cdx_config_attr(vendor, "0x%04x\n");
+cdx_config_attr(device, "0x%04x\n");
+cdx_config_attr(subsystem_vendor, "0x%04x\n");
+cdx_config_attr(subsystem_device, "0x%04x\n");
+cdx_config_attr(revision, "0x%02x\n");
+cdx_config_attr(class, "0x%06x\n");
+
+static ssize_t remove_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool val;
+
+ if (kstrtobool(buf, &val) < 0)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ if (device_remove_file_self(dev, attr)) {
+ int ret;
+
+ ret = cdx_unregister_device(dev, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+static DEVICE_ATTR_WO(remove);
+
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ bool val;
+ int ret;
+
+ if (kstrtobool(buf, &val) < 0)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ if (cdx_dev->is_bus)
+ /* Reset all the devices attached to cdx bus */
+ ret = device_for_each_child(dev, NULL, reset_cdx_device);
+ else
+ ret = cdx_dev_reset(dev);
+
+ return ret < 0 ? ret : count;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ return sprintf(buf, "cdx:v%04Xd%04Xsv%04Xsd%04Xc%06X\n", cdx_dev->vendor,
+ cdx_dev->device, cdx_dev->subsystem_vendor, cdx_dev->subsystem_device,
+ cdx_dev->class);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ int ret;
+
+ if (WARN_ON(dev->bus != &cdx_bus_type))
+ return -EINVAL;
+
+ ret = driver_set_override(dev, &cdx_dev->driver_override, buf, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ bool enable;
+ int ret;
+
+ if (kstrtobool(buf, &enable) < 0)
+ return -EINVAL;
+
+ if (enable == cdx_dev->enabled)
+ return count;
+
+ if (enable && cdx->ops->bus_enable)
+ ret = cdx->ops->bus_enable(cdx, cdx_dev->bus_num);
+ else if (!enable && cdx->ops->bus_disable)
+ ret = cdx->ops->bus_disable(cdx, cdx_dev->bus_num);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (!ret)
+ cdx_dev->enabled = enable;
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ return sysfs_emit(buf, "%u\n", cdx_dev->enabled);
+}
+static DEVICE_ATTR_RW(enable);
+
+static umode_t cdx_dev_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cdx_device *cdx_dev;
+
+ cdx_dev = to_cdx_device(dev);
+ if (!cdx_dev->is_bus)
+ return a->mode;
+
+ return 0;
+}
+
+static umode_t cdx_bus_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cdx_device *cdx_dev;
+
+ cdx_dev = to_cdx_device(dev);
+ if (cdx_dev->is_bus)
+ return a->mode;
+
+ return 0;
+}
+
+static struct attribute *cdx_dev_attrs[] = {
+ &dev_attr_remove.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_class.attr,
+ &dev_attr_revision.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group cdx_dev_group = {
+ .attrs = cdx_dev_attrs,
+ .is_visible = cdx_dev_attrs_are_visible,
+};
+
+static struct attribute *cdx_bus_dev_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_reset.attr,
+ NULL,
+};
+
+static const struct attribute_group cdx_bus_dev_group = {
+ .attrs = cdx_bus_dev_attrs,
+ .is_visible = cdx_bus_attrs_are_visible,
+};
+
+static const struct attribute_group *cdx_dev_groups[] = {
+ &cdx_dev_group,
+ &cdx_bus_dev_group,
+ NULL,
+};
+
+static int cdx_debug_resource_show(struct seq_file *s, void *data)
+{
+ struct cdx_device *cdx_dev = s->private;
+ int i;
+
+ for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) {
+ struct resource *res = &cdx_dev->res[i];
+
+ seq_printf(s, "%pr\n", res);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(cdx_debug_resource);
+
+static void cdx_device_debugfs_init(struct cdx_device *cdx_dev)
+{
+ cdx_dev->debugfs_dir = debugfs_create_dir(dev_name(&cdx_dev->dev), cdx_debugfs_dir);
+ if (IS_ERR(cdx_dev->debugfs_dir))
+ return;
+
+ debugfs_create_file("resource", 0444, cdx_dev->debugfs_dir, cdx_dev,
+ &cdx_debug_resource_fops);
+}
+
+static ssize_t rescan_store(const struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct cdx_controller *cdx;
+ struct platform_device *pd;
+ struct device_node *np;
+ bool val;
+
+ if (kstrtobool(buf, &val) < 0)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&cdx_controller_lock);
+
+ /* Unregister all the devices on the bus */
+ cdx_unregister_devices(&cdx_bus_type);
+
+ /* Rescan all the devices */
+ for_each_compatible_node(np, NULL, compat_node_name) {
+ pd = of_find_device_by_node(np);
+ if (!pd) {
+ of_node_put(np);
+ count = -EINVAL;
+ goto unlock;
+ }
+
+ cdx = platform_get_drvdata(pd);
+ if (cdx && cdx->controller_registered && cdx->ops->scan)
+ cdx->ops->scan(cdx);
+
+ put_device(&pd->dev);
+ }
+
+unlock:
+ mutex_unlock(&cdx_controller_lock);
+
+ return count;
+}
+static BUS_ATTR_WO(rescan);
+
+static struct attribute *cdx_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(cdx_bus);
+
+const struct bus_type cdx_bus_type = {
+ .name = "cdx",
+ .match = cdx_bus_match,
+ .probe = cdx_probe,
+ .remove = cdx_remove,
+ .shutdown = cdx_shutdown,
+ .dma_configure = cdx_dma_configure,
+ .dma_cleanup = cdx_dma_cleanup,
+ .bus_groups = cdx_bus_groups,
+ .dev_groups = cdx_dev_groups,
+};
+EXPORT_SYMBOL_GPL(cdx_bus_type);
+
+int __cdx_driver_register(struct cdx_driver *cdx_driver,
+ struct module *owner)
+{
+ int error;
+
+ cdx_driver->driver.owner = owner;
+ cdx_driver->driver.bus = &cdx_bus_type;
+
+ error = driver_register(&cdx_driver->driver);
+ if (error) {
+ pr_err("driver_register() failed for %s: %d\n",
+ cdx_driver->driver.name, error);
+ return error;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cdx_driver_register);
+
+void cdx_driver_unregister(struct cdx_driver *cdx_driver)
+{
+ driver_unregister(&cdx_driver->driver);
+}
+EXPORT_SYMBOL_GPL(cdx_driver_unregister);
+
+static void cdx_device_release(struct device *dev)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+
+ kfree(cdx_dev);
+}
+
+static const struct vm_operations_struct cdx_phys_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
+/**
+ * cdx_mmap_resource - map a CDX resource into user memory space
+ * @fp: File pointer. Not used in this function, but required where
+ * this API is registered as a callback.
+ * @kobj: kobject for mapping
+ * @attr: struct bin_attribute for the file being mapped
+ * @vma: struct vm_area_struct passed into the mmap
+ *
+ * Use the regular CDX mapping routines to map a CDX resource into userspace.
+ *
+ * Return: true on success, false otherwise.
+ */
+static int cdx_mmap_resource(struct file *fp, struct kobject *kobj,
+ const struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj));
+ int num = (unsigned long)attr->private;
+ struct resource *res;
+ unsigned long size;
+
+ res = &cdx_dev->res[num];
+ if (iomem_is_exclusive(res->start))
+ return -EINVAL;
+
+ /* Make sure the caller is mapping a valid resource for this device */
+ size = ((cdx_resource_len(cdx_dev, num) - 1) >> PAGE_SHIFT) + 1;
+ if (vma->vm_pgoff + vma_pages(vma) > size)
+ return -EINVAL;
+
+ /*
+ * Map memory region and vm->vm_pgoff is expected to be an
+ * offset within that region.
+ */
+ vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
+ vma->vm_pgoff += (cdx_resource_start(cdx_dev, num) >> PAGE_SHIFT);
+ vma->vm_ops = &cdx_phys_vm_ops;
+ return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+
+static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num)
+{
+ int i;
+
+ /* removing the bin attributes */
+ for (i = 0; i < num; i++) {
+ struct bin_attribute *res_attr;
+
+ res_attr = cdx_dev->res_attr[i];
+ if (res_attr) {
+ sysfs_remove_bin_file(&cdx_dev->dev.kobj, res_attr);
+ kfree(res_attr);
+ }
+ }
+}
+
+#define CDX_RES_ATTR_NAME_LEN 10
+static int cdx_create_res_attr(struct cdx_device *cdx_dev, int num)
+{
+ struct bin_attribute *res_attr;
+ char *res_attr_name;
+ int ret;
+
+ res_attr = kzalloc(sizeof(*res_attr) + CDX_RES_ATTR_NAME_LEN, GFP_ATOMIC);
+ if (!res_attr)
+ return -ENOMEM;
+
+ res_attr_name = (char *)(res_attr + 1);
+
+ sysfs_bin_attr_init(res_attr);
+
+ cdx_dev->res_attr[num] = res_attr;
+ sprintf(res_attr_name, "resource%d", num);
+
+ res_attr->mmap = cdx_mmap_resource;
+ res_attr->attr.name = res_attr_name;
+ res_attr->attr.mode = 0600;
+ res_attr->size = cdx_resource_len(cdx_dev, num);
+ res_attr->private = (void *)(unsigned long)num;
+ ret = sysfs_create_bin_file(&cdx_dev->dev.kobj, res_attr);
+ if (ret)
+ kfree(res_attr);
+
+ return ret;
+}
+
+int cdx_device_add(struct cdx_dev_params *dev_params)
+{
+ struct cdx_controller *cdx = dev_params->cdx;
+ struct cdx_device *cdx_dev;
+ int ret, i;
+
+ cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL);
+ if (!cdx_dev)
+ return -ENOMEM;
+
+ /* Populate resource */
+ memcpy(cdx_dev->res, dev_params->res, sizeof(struct resource) *
+ dev_params->res_count);
+ cdx_dev->res_count = dev_params->res_count;
+
+ /* Populate CDX dev params */
+ cdx_dev->req_id = dev_params->req_id;
+ cdx_dev->msi_dev_id = dev_params->msi_dev_id;
+ cdx_dev->vendor = dev_params->vendor;
+ cdx_dev->device = dev_params->device;
+ cdx_dev->subsystem_vendor = dev_params->subsys_vendor;
+ cdx_dev->subsystem_device = dev_params->subsys_device;
+ cdx_dev->class = dev_params->class;
+ cdx_dev->revision = dev_params->revision;
+ cdx_dev->bus_num = dev_params->bus_num;
+ cdx_dev->dev_num = dev_params->dev_num;
+ cdx_dev->cdx = dev_params->cdx;
+ cdx_dev->dma_mask = CDX_DEFAULT_DMA_MASK;
+
+ /* Initialize generic device */
+ device_initialize(&cdx_dev->dev);
+ cdx_dev->dev.parent = dev_params->parent;
+ cdx_dev->dev.bus = &cdx_bus_type;
+ cdx_dev->dev.dma_mask = &cdx_dev->dma_mask;
+ cdx_dev->dev.release = cdx_device_release;
+ cdx_dev->msi_write_pending = false;
+ mutex_init(&cdx_dev->irqchip_lock);
+
+ /* Set Name */
+ dev_set_name(&cdx_dev->dev, "cdx-%02x:%02x",
+ ((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
+ cdx_dev->dev_num);
+
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
+ cdx_dev->num_msi = dev_params->num_msi;
+ dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
+ }
+
+ ret = device_add(&cdx_dev->dev);
+ if (ret) {
+ dev_err(&cdx_dev->dev,
+ "cdx device add failed: %d", ret);
+ goto fail;
+ }
+
+ /* Create resource<N> attributes */
+ for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) {
+ if (cdx_resource_flags(cdx_dev, i) & IORESOURCE_MEM) {
+ /* skip empty resources */
+ if (!cdx_resource_len(cdx_dev, i))
+ continue;
+
+ ret = cdx_create_res_attr(cdx_dev, i);
+ if (ret != 0) {
+ dev_err(&cdx_dev->dev,
+ "cdx device resource<%d> file creation failed: %d", i, ret);
+ goto resource_create_fail;
+ }
+ }
+ }
+
+ cdx_device_debugfs_init(cdx_dev);
+
+ return 0;
+resource_create_fail:
+ cdx_destroy_res_attr(cdx_dev, i);
+ device_del(&cdx_dev->dev);
+fail:
+ /*
+ * Do not free cdx_dev here as it would be freed in
+ * cdx_device_release() called from put_device().
+ */
+ put_device(&cdx_dev->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_device_add, "CDX_BUS_CONTROLLER");
+
+struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num)
+{
+ struct cdx_device *cdx_dev;
+ int ret;
+
+ cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL);
+ if (!cdx_dev)
+ return NULL;
+
+ device_initialize(&cdx_dev->dev);
+ cdx_dev->cdx = cdx;
+
+ cdx_dev->dev.parent = cdx->dev;
+ cdx_dev->dev.bus = &cdx_bus_type;
+ cdx_dev->dev.release = cdx_device_release;
+ cdx_dev->is_bus = true;
+ cdx_dev->bus_num = bus_num;
+
+ dev_set_name(&cdx_dev->dev, "cdx-%02x",
+ ((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (bus_num & CDX_BUS_NUM_MASK)));
+
+ ret = device_add(&cdx_dev->dev);
+ if (ret) {
+ dev_err(&cdx_dev->dev, "cdx bus device add failed: %d\n", ret);
+ goto device_add_fail;
+ }
+
+ if (cdx->ops->bus_enable) {
+ ret = cdx->ops->bus_enable(cdx, bus_num);
+ if (ret && ret != -EALREADY) {
+ dev_err(cdx->dev, "cdx bus enable failed: %d\n", ret);
+ goto bus_enable_fail;
+ }
+ }
+
+ cdx_dev->enabled = true;
+ return &cdx_dev->dev;
+
+bus_enable_fail:
+ device_del(&cdx_dev->dev);
+device_add_fail:
+ put_device(&cdx_dev->dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_bus_add, "CDX_BUS_CONTROLLER");
+
+int cdx_register_controller(struct cdx_controller *cdx)
+{
+ int ret;
+
+ ret = ida_alloc_range(&cdx_controller_ida, 0, MAX_CDX_CONTROLLERS - 1, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(cdx->dev,
+ "No free index available. Maximum controllers already registered\n");
+ cdx->id = (u8)MAX_CDX_CONTROLLERS;
+ return ret;
+ }
+
+ mutex_lock(&cdx_controller_lock);
+ cdx->id = ret;
+
+ /* Scan all the devices */
+ if (cdx->ops->scan)
+ cdx->ops->scan(cdx);
+ cdx->controller_registered = true;
+ mutex_unlock(&cdx_controller_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_register_controller, "CDX_BUS_CONTROLLER");
+
+void cdx_unregister_controller(struct cdx_controller *cdx)
+{
+ if (cdx->id >= MAX_CDX_CONTROLLERS)
+ return;
+
+ mutex_lock(&cdx_controller_lock);
+
+ cdx->controller_registered = false;
+ device_for_each_child(cdx->dev, NULL, cdx_unregister_device);
+ ida_free(&cdx_controller_ida, cdx->id);
+
+ mutex_unlock(&cdx_controller_lock);
+}
+EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, "CDX_BUS_CONTROLLER");
+
+static int __init cdx_bus_init(void)
+{
+ int ret;
+
+ ret = bus_register(&cdx_bus_type);
+ if (!ret)
+ cdx_debugfs_dir = debugfs_create_dir(cdx_bus_type.name, NULL);
+
+ return ret;
+}
+postcore_initcall(cdx_bus_init);
diff --git a/drivers/cdx/cdx.h b/drivers/cdx/cdx.h
new file mode 100644
index 000000000000..9c60c04dcf87
--- /dev/null
+++ b/drivers/cdx/cdx.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Header file for the CDX Bus
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _CDX_H_
+#define _CDX_H_
+
+#include <linux/cdx/cdx_bus.h>
+
+/**
+ * struct cdx_dev_params - CDX device parameters
+ * @cdx: CDX controller associated with the device
+ * @parent: Associated CDX Bus device
+ * @vendor: Vendor ID for CDX device
+ * @device: Device ID for CDX device
+ * @subsys_vendor: Sub vendor ID for CDX device
+ * @subsys_device: Sub device ID for CDX device
+ * @bus_num: Bus number for this CDX device
+ * @dev_num: Device number for this device
+ * @res: array of MMIO region entries
+ * @res_count: number of valid MMIO regions
+ * @req_id: Requestor ID associated with CDX device
+ * @class: Class of the CDX Device
+ * @revision: Revision of the CDX device
+ * @msi_dev_id: MSI device ID associated with CDX device
+ * @num_msi: Number of MSI's supported by the device
+ */
+struct cdx_dev_params {
+ struct cdx_controller *cdx;
+ struct device *parent;
+ u16 vendor;
+ u16 device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 bus_num;
+ u8 dev_num;
+ struct resource res[MAX_CDX_DEV_RESOURCES];
+ u8 res_count;
+ u32 req_id;
+ u32 class;
+ u8 revision;
+ u32 msi_dev_id;
+ u32 num_msi;
+};
+
+/**
+ * cdx_register_controller - Register a CDX controller and its ports
+ * on the CDX bus.
+ * @cdx: The CDX controller to register
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int cdx_register_controller(struct cdx_controller *cdx);
+
+/**
+ * cdx_unregister_controller - Unregister a CDX controller
+ * @cdx: The CDX controller to unregister
+ */
+void cdx_unregister_controller(struct cdx_controller *cdx);
+
+/**
+ * cdx_device_add - Add a CDX device. This function adds a CDX device
+ * on the CDX bus as per the device parameters provided
+ * by caller. It also creates and registers an associated
+ * Linux generic device.
+ * @dev_params: device parameters associated with the device to be created.
+ *
+ * Return: -errno on failure, 0 on success.
+ */
+int cdx_device_add(struct cdx_dev_params *dev_params);
+
+/**
+ * cdx_bus_add - Add a CDX bus. This function adds a bus on the CDX bus
+ * subsystem. It creates a CDX device for the corresponding bus and
+ * also registers an associated Linux generic device.
+ * @cdx: Associated CDX controller
+ * @us_num: Bus number
+ *
+ * Return: associated Linux generic device pointer on success or NULL on failure.
+ */
+struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num);
+
+/**
+ * cdx_msi_domain_init - Init the CDX bus MSI domain.
+ * @dev: Device of the CDX bus controller
+ *
+ * Return: CDX MSI domain, NULL on failure
+ */
+struct irq_domain *cdx_msi_domain_init(struct device *dev);
+
+#endif /* _CDX_H_ */
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
new file mode 100644
index 000000000000..91b95422b263
--- /dev/null
+++ b/drivers/cdx/cdx_msi.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD CDX bus driver MSI support
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/cdx/cdx_bus.h>
+
+#include "cdx.h"
+
+static void cdx_msi_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+
+ /* We would not operate on msg here rather we wait for irq_bus_sync_unlock()
+ * to be called from preemptible task context.
+ */
+ msi_desc->msg = *msg;
+ cdx_dev->msi_write_pending = true;
+}
+
+static void cdx_msi_write_irq_lock(struct irq_data *irq_data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+
+ mutex_lock(&cdx_dev->irqchip_lock);
+}
+
+static void cdx_msi_write_irq_unlock(struct irq_data *irq_data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ if (!cdx_dev->msi_write_pending) {
+ mutex_unlock(&cdx_dev->irqchip_lock);
+ return;
+ }
+
+ cdx_dev->msi_write_pending = false;
+ mutex_unlock(&cdx_dev->irqchip_lock);
+
+ dev_config.msi.msi_index = msi_desc->msi_index;
+ dev_config.msi.data = msi_desc->msg.data;
+ dev_config.msi.addr = ((u64)(msi_desc->msg.address_hi) << 32) | msi_desc->msg.address_lo;
+
+ /*
+ * dev_configure() is a controller callback which can interact with
+ * Firmware or other entities, and can sleep, so invoke this function
+ * outside of the mutex held region.
+ */
+ dev_config.type = CDX_DEV_MSI_CONF;
+ if (cdx->ops->dev_configure)
+ cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num, &dev_config);
+}
+
+int cdx_enable_msi(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ dev_config.type = CDX_DEV_MSI_ENABLE;
+ dev_config.msi_enable = true;
+ if (cdx->ops->dev_configure) {
+ return cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num,
+ &dev_config);
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(cdx_enable_msi);
+
+void cdx_disable_msi(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ dev_config.type = CDX_DEV_MSI_ENABLE;
+ dev_config.msi_enable = false;
+ if (cdx->ops->dev_configure)
+ cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num, &dev_config);
+}
+EXPORT_SYMBOL_GPL(cdx_disable_msi);
+
+static struct irq_chip cdx_msi_irq_chip = {
+ .name = "CDX-MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = msi_domain_set_affinity,
+ .irq_write_msi_msg = cdx_msi_write_msg,
+ .irq_bus_lock = cdx_msi_write_irq_lock,
+ .irq_bus_sync_unlock = cdx_msi_write_irq_unlock
+};
+
+/* Convert an msi_desc to a unique identifier within the domain. */
+static irq_hw_number_t cdx_domain_calc_hwirq(struct cdx_device *dev,
+ struct msi_desc *desc)
+{
+ return ((irq_hw_number_t)dev->msi_dev_id << 10) | desc->msi_index;
+}
+
+static void cdx_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = cdx_domain_calc_hwirq(to_cdx_device(desc->dev), desc);
+}
+
+static int cdx_msi_prepare(struct irq_domain *msi_domain,
+ struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct device *parent = cdx_dev->cdx->dev;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ /* Retrieve device ID from requestor ID using parent device */
+ ret = of_map_id(parent->of_node, cdx_dev->msi_dev_id, "msi-map", "msi-map-mask",
+ NULL, &dev_id);
+ if (ret) {
+ dev_err(dev, "of_map_id failed for MSI: %d\n", ret);
+ return ret;
+ }
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+ /* Set the device Id to be passed to the GIC-ITS */
+ info->scratchpad[0].ul = dev_id;
+#endif
+
+ msi_info = msi_get_domain_info(msi_domain->parent);
+
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops cdx_msi_ops = {
+ .msi_prepare = cdx_msi_prepare,
+ .set_desc = cdx_msi_set_desc
+};
+
+static struct msi_domain_info cdx_msi_domain_info = {
+ .ops = &cdx_msi_ops,
+ .chip = &cdx_msi_irq_chip,
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS
+};
+
+struct irq_domain *cdx_msi_domain_init(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct fwnode_handle *fwnode_handle;
+ struct irq_domain *cdx_msi_domain;
+ struct device_node *parent_node;
+ struct irq_domain *parent;
+
+ fwnode_handle = of_fwnode_handle(np);
+
+ parent_node = of_parse_phandle(np, "msi-map", 1);
+ if (!parent_node) {
+ dev_err(dev, "msi-map not present on cdx controller\n");
+ return NULL;
+ }
+
+ parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
+ if (!parent || !msi_get_domain_info(parent)) {
+ dev_err(dev, "unable to locate ITS domain\n");
+ return NULL;
+ }
+
+ cdx_msi_domain = msi_create_irq_domain(fwnode_handle, &cdx_msi_domain_info, parent);
+ if (!cdx_msi_domain) {
+ dev_err(dev, "unable to create CDX-MSI domain\n");
+ return NULL;
+ }
+
+ dev_dbg(dev, "CDX-MSI domain created\n");
+
+ return cdx_msi_domain;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, "CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
new file mode 100644
index 000000000000..a480b62cbd1f
--- /dev/null
+++ b/drivers/cdx/controller/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# CDX controller configuration
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+if CDX_BUS
+
+config CDX_CONTROLLER
+ tristate "CDX bus controller"
+ depends on HAS_DMA
+ select REMOTEPROC
+ select RPMSG
+ help
+ CDX controller drives the CDX bus. It interacts with
+ firmware to get the hardware devices and registers with
+ the CDX bus. Say Y to enable the CDX hardware driver.
+
+ If unsure, say N.
+
+endif
diff --git a/drivers/cdx/controller/Makefile b/drivers/cdx/controller/Makefile
new file mode 100644
index 000000000000..f071be411d96
--- /dev/null
+++ b/drivers/cdx/controller/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for CDX controller drivers
+#
+# Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+#
+
+obj-$(CONFIG_CDX_CONTROLLER) += cdx-controller.o
+cdx-controller-objs := cdx_controller.o cdx_rpmsg.o mcdi.o mcdi_functions.o
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
new file mode 100644
index 000000000000..280f207735da
--- /dev/null
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CDX host controller driver for AMD versal-net platform.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/cdx/cdx_bus.h>
+#include <linux/irqdomain.h>
+
+#include "cdx_controller.h"
+#include "../cdx.h"
+#include "mcdi_functions.h"
+#include "mcdid.h"
+
+static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void cdx_mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ if (cdx_rpmsg_send(cdx, hdr, hdr_len, sdu, sdu_len))
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data\n");
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = cdx_mcdi_rpc_timeout,
+ .mcdi_request = cdx_mcdi_request,
+};
+
+static int cdx_bus_enable(struct cdx_controller *cdx, u8 bus_num)
+{
+ return cdx_mcdi_bus_enable(cdx->priv, bus_num);
+}
+
+static int cdx_bus_disable(struct cdx_controller *cdx, u8 bus_num)
+{
+ return cdx_mcdi_bus_disable(cdx->priv, bus_num);
+}
+
+void cdx_rpmsg_post_probe(struct cdx_controller *cdx)
+{
+ /* Register CDX controller with CDX bus driver */
+ if (cdx_register_controller(cdx))
+ dev_err(cdx->dev, "Failed to register CDX controller\n");
+}
+
+void cdx_rpmsg_pre_remove(struct cdx_controller *cdx)
+{
+ cdx_unregister_controller(cdx);
+ cdx_mcdi_wait_for_quiescence(cdx->priv, MCDI_RPC_TIMEOUT);
+}
+
+static int cdx_configure_device(struct cdx_controller *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_device_config *dev_config)
+{
+ u16 msi_index;
+ int ret = 0;
+ u32 data;
+ u64 addr;
+
+ switch (dev_config->type) {
+ case CDX_DEV_MSI_CONF:
+ msi_index = dev_config->msi.msi_index;
+ data = dev_config->msi.data;
+ addr = dev_config->msi.addr;
+
+ ret = cdx_mcdi_write_msi(cdx->priv, bus_num, dev_num, msi_index, addr, data);
+ break;
+ case CDX_DEV_RESET_CONF:
+ ret = cdx_mcdi_reset_device(cdx->priv, bus_num, dev_num);
+ break;
+ case CDX_DEV_BUS_MASTER_CONF:
+ ret = cdx_mcdi_bus_master_enable(cdx->priv, bus_num, dev_num,
+ dev_config->bus_master_enable);
+ break;
+ case CDX_DEV_MSI_ENABLE:
+ ret = cdx_mcdi_msi_enable(cdx->priv, bus_num, dev_num, dev_config->msi_enable);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int cdx_scan_devices(struct cdx_controller *cdx)
+{
+ struct cdx_mcdi *cdx_mcdi = cdx->priv;
+ u8 bus_num, dev_num, num_cdx_bus;
+ int ret;
+
+ /* MCDI FW Read: Fetch the number of CDX buses on this controller */
+ ret = cdx_mcdi_get_num_buses(cdx_mcdi);
+ if (ret < 0) {
+ dev_err(cdx->dev,
+ "Get number of CDX buses failed: %d\n", ret);
+ return ret;
+ }
+ num_cdx_bus = (u8)ret;
+
+ for (bus_num = 0; bus_num < num_cdx_bus; bus_num++) {
+ struct device *bus_dev;
+ u8 num_cdx_dev;
+
+ /* Add the bus on cdx subsystem */
+ bus_dev = cdx_bus_add(cdx, bus_num);
+ if (!bus_dev)
+ continue;
+
+ /* MCDI FW Read: Fetch the number of devices present */
+ ret = cdx_mcdi_get_num_devs(cdx_mcdi, bus_num);
+ if (ret < 0) {
+ dev_err(cdx->dev,
+ "Get devices on CDX bus %d failed: %d\n", bus_num, ret);
+ continue;
+ }
+ num_cdx_dev = (u8)ret;
+
+ for (dev_num = 0; dev_num < num_cdx_dev; dev_num++) {
+ struct cdx_dev_params dev_params;
+
+ /* MCDI FW: Get the device config */
+ ret = cdx_mcdi_get_dev_config(cdx_mcdi, bus_num,
+ dev_num, &dev_params);
+ if (ret) {
+ dev_err(cdx->dev,
+ "CDX device config get failed for %d(bus):%d(dev), %d\n",
+ bus_num, dev_num, ret);
+ continue;
+ }
+ dev_params.cdx = cdx;
+ dev_params.parent = bus_dev;
+
+ /* Add the device to the cdx bus */
+ ret = cdx_device_add(&dev_params);
+ if (ret) {
+ dev_err(cdx->dev, "registering cdx dev: %d failed: %d\n",
+ dev_num, ret);
+ continue;
+ }
+
+ dev_dbg(cdx->dev, "CDX dev: %d on cdx bus: %d created\n",
+ dev_num, bus_num);
+ }
+ }
+
+ return 0;
+}
+
+static struct cdx_ops cdx_ops = {
+ .bus_enable = cdx_bus_enable,
+ .bus_disable = cdx_bus_disable,
+ .scan = cdx_scan_devices,
+ .dev_configure = cdx_configure_device,
+};
+
+static int xlnx_cdx_probe(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx;
+ struct cdx_mcdi *cdx_mcdi;
+ int ret;
+
+ cdx_mcdi = kzalloc(sizeof(*cdx_mcdi), GFP_KERNEL);
+ if (!cdx_mcdi)
+ return -ENOMEM;
+
+ /* Store the MCDI ops */
+ cdx_mcdi->mcdi_ops = &mcdi_ops;
+ /* MCDI FW: Initialize the FW path */
+ ret = cdx_mcdi_init(cdx_mcdi);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "MCDI Initialization failed\n");
+ goto mcdi_init_fail;
+ }
+
+ cdx = kzalloc(sizeof(*cdx), GFP_KERNEL);
+ if (!cdx) {
+ ret = -ENOMEM;
+ goto cdx_alloc_fail;
+ }
+ platform_set_drvdata(pdev, cdx);
+
+ cdx->dev = &pdev->dev;
+ cdx->priv = cdx_mcdi;
+ cdx->ops = &cdx_ops;
+
+ /* Create MSI domain */
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (!cdx->msi_domain) {
+ ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
+ goto cdx_msi_fail;
+ }
+
+ ret = cdx_setup_rpmsg(pdev);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "Failed to register CDX RPMsg transport\n");
+ goto cdx_rpmsg_fail;
+ }
+
+ return 0;
+
+cdx_rpmsg_fail:
+ irq_domain_remove(cdx->msi_domain);
+cdx_msi_fail:
+ kfree(cdx);
+cdx_alloc_fail:
+ cdx_mcdi_finish(cdx_mcdi);
+mcdi_init_fail:
+ kfree(cdx_mcdi);
+
+ return ret;
+}
+
+static void xlnx_cdx_remove(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx = platform_get_drvdata(pdev);
+ struct cdx_mcdi *cdx_mcdi = cdx->priv;
+
+ cdx_destroy_rpmsg(pdev);
+
+ irq_domain_remove(cdx->msi_domain);
+ kfree(cdx);
+
+ cdx_mcdi_finish(cdx_mcdi);
+ kfree(cdx_mcdi);
+}
+
+static const struct of_device_id cdx_match_table[] = {
+ {.compatible = "xlnx,versal-net-cdx",},
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, cdx_match_table);
+
+static struct platform_driver cdx_pdriver = {
+ .driver = {
+ .name = "cdx-controller",
+ .of_match_table = cdx_match_table,
+ },
+ .probe = xlnx_cdx_probe,
+ .remove = xlnx_cdx_remove,
+};
+
+module_platform_driver(cdx_pdriver);
+
+MODULE_AUTHOR("AMD Inc.");
+MODULE_DESCRIPTION("CDX controller for AMD devices");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("CDX_BUS_CONTROLLER");
diff --git a/drivers/cdx/controller/cdx_controller.h b/drivers/cdx/controller/cdx_controller.h
new file mode 100644
index 000000000000..43b7c742df87
--- /dev/null
+++ b/drivers/cdx/controller/cdx_controller.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Header file for the CDX Controller
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _CDX_CONTROLLER_H_
+#define _CDX_CONTROLLER_H_
+
+#include <linux/cdx/cdx_bus.h>
+#include "mcdi_functions.h"
+
+void cdx_rpmsg_post_probe(struct cdx_controller *cdx);
+
+void cdx_rpmsg_pre_remove(struct cdx_controller *cdx);
+
+int cdx_rpmsg_send(struct cdx_mcdi *cdx_mcdi,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len);
+
+void cdx_rpmsg_read_resp(struct cdx_mcdi *cdx_mcdi,
+ struct cdx_dword *outbuf, size_t offset,
+ size_t outlen);
+
+int cdx_setup_rpmsg(struct platform_device *pdev);
+
+void cdx_destroy_rpmsg(struct platform_device *pdev);
+
+#endif /* _CDX_CONT_PRIV_H_ */
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
new file mode 100644
index 000000000000..59aabd99fa8f
--- /dev/null
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform driver for CDX bus.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/rpmsg.h>
+#include <linux/remoteproc.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/cdx/cdx_bus.h>
+#include <linux/module.h>
+
+#include "../cdx.h"
+#include "cdx_controller.h"
+#include "mcdi_functions.h"
+#include "mcdid.h"
+
+static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
+ { .name = "mcdi_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, cdx_rpmsg_id_table);
+
+int cdx_rpmsg_send(struct cdx_mcdi *cdx_mcdi,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ unsigned char *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return -ENOMEM;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx_mcdi->ept, send_buf, hdr_len + sdu_len);
+ kfree(send_buf);
+
+ return ret;
+}
+
+static int cdx_attach_to_rproc(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+ struct device *dev;
+ struct rproc *rp;
+ int ret;
+
+ dev = &pdev->dev;
+ cdx_c = platform_get_drvdata(pdev);
+ cdx_mcdi = cdx_c->priv;
+
+ r5_core_node = of_parse_phandle(dev->of_node, "xlnx,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "xlnx,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp) {
+ ret = -EPROBE_DEFER;
+ goto pdev_err;
+ }
+
+ /* Attach to remote processor */
+ ret = rproc_boot(rp);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ rproc_put(rp);
+ goto pdev_err;
+ }
+
+ cdx_mcdi->r5_rproc = rp;
+pdev_err:
+ of_node_put(r5_core_node);
+ return ret;
+}
+
+static void cdx_detach_to_r5(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+
+ cdx_c = platform_get_drvdata(pdev);
+ cdx_mcdi = cdx_c->priv;
+
+ rproc_detach(cdx_mcdi->r5_rproc);
+ rproc_put(cdx_mcdi->r5_rproc);
+}
+
+static int cdx_rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct cdx_controller *cdx_c = dev_get_drvdata(&rpdev->dev);
+ struct cdx_mcdi *cdx_mcdi = cdx_c->priv;
+
+ if (len > MCDI_BUF_LEN)
+ return -EINVAL;
+
+ cdx_mcdi_process_cmd(cdx_mcdi, (struct cdx_dword *)data, len);
+
+ return 0;
+}
+
+static void cdx_rpmsg_post_probe_work(struct work_struct *work)
+{
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+
+ cdx_mcdi = container_of(work, struct cdx_mcdi, work);
+ cdx_c = dev_get_drvdata(&cdx_mcdi->rpdev->dev);
+ cdx_rpmsg_post_probe(cdx_c);
+}
+
+static int cdx_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo = {0};
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+
+ cdx_c = (struct cdx_controller *)cdx_rpmsg_id_table[0].driver_data;
+ cdx_mcdi = cdx_c->priv;
+
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, cdx_rpmsg_id_table[0].name, sizeof(chinfo.name));
+
+ cdx_mcdi->ept = rpmsg_create_ept(rpdev, cdx_rpmsg_cb, NULL, chinfo);
+ if (!cdx_mcdi->ept) {
+ dev_err_probe(&rpdev->dev, -ENXIO,
+ "Failed to create ept for channel %s\n",
+ chinfo.name);
+ return -EINVAL;
+ }
+
+ cdx_mcdi->rpdev = rpdev;
+ dev_set_drvdata(&rpdev->dev, cdx_c);
+
+ schedule_work(&cdx_mcdi->work);
+ return 0;
+}
+
+static void cdx_rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct cdx_controller *cdx_c = dev_get_drvdata(&rpdev->dev);
+ struct cdx_mcdi *cdx_mcdi = cdx_c->priv;
+
+ flush_work(&cdx_mcdi->work);
+ cdx_rpmsg_pre_remove(cdx_c);
+
+ rpmsg_destroy_ept(cdx_mcdi->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver cdx_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .id_table = cdx_rpmsg_id_table,
+ .probe = cdx_rpmsg_probe,
+ .remove = cdx_rpmsg_remove,
+ .callback = cdx_rpmsg_cb,
+};
+
+int cdx_setup_rpmsg(struct platform_device *pdev)
+{
+ struct cdx_controller *cdx_c;
+ struct cdx_mcdi *cdx_mcdi;
+ int ret;
+
+ /* Attach to remote processor */
+ ret = cdx_attach_to_rproc(pdev);
+ if (ret)
+ return ret;
+
+ cdx_c = platform_get_drvdata(pdev);
+ cdx_mcdi = cdx_c->priv;
+
+ /* Register RPMsg driver */
+ cdx_rpmsg_id_table[0].driver_data = (kernel_ulong_t)cdx_c;
+
+ INIT_WORK(&cdx_mcdi->work, cdx_rpmsg_post_probe_work);
+ ret = register_rpmsg_driver(&cdx_rpmsg_driver);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register cdx RPMsg driver: %d\n", ret);
+ cdx_detach_to_r5(pdev);
+ }
+
+ return ret;
+}
+
+void cdx_destroy_rpmsg(struct platform_device *pdev)
+{
+ unregister_rpmsg_driver(&cdx_rpmsg_driver);
+
+ cdx_detach_to_r5(pdev);
+}
diff --git a/drivers/cdx/controller/mc_cdx_pcol.h b/drivers/cdx/controller/mc_cdx_pcol.h
new file mode 100644
index 000000000000..832a44af963e
--- /dev/null
+++ b/drivers/cdx/controller/mc_cdx_pcol.h
@@ -0,0 +1,708 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Driver for AMD network controllers and boards
+ *
+ * Copyright (C) 2021, Xilinx, Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef MC_CDX_PCOL_H
+#define MC_CDX_PCOL_H
+
+/* The current version of the MCDI protocol. */
+#define MCDI_PCOL_VERSION 2
+
+/*
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes its request into MC shared memory, and rings the
+ * doorbell. Each request is completed either by the MC writing
+ * back into shared memory, or by writing out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some responses may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request; a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
+/*
+ * The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+
+/*
+ * the errno value may be followed by the (0-based) number of the
+ * first argument that could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* MC_CMD_ERR MCDI error codes. */
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 0x1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 0x2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 0x4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 0x5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 0x6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 0xb
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 0xc
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 0xd
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 0x10
+/* No such device */
+#define MC_CMD_ERR_ENODEV 0x13
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 0x16
+/* No space */
+#define MC_CMD_ERR_ENOSPC 0x1c
+/* Read-only */
+#define MC_CMD_ERR_EROFS 0x1e
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 0x20
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 0x22
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 0x23
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 0x26
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 0x3e
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 0x43
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 0x47
+/* Bad message */
+#define MC_CMD_ERR_EBADMSG 0x4a
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 0x5f
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 0x63
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 0x6b
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 0x72
+/* Stale handle. The handle references resource that no longer exists */
+#define MC_CMD_ERR_ESTALE 0x74
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/*
+ * The requested operation might require the command to be passed between
+ * MCs, and the transport doesn't support that. Should only ever been seen over
+ * the UART.
+ */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/*
+ * Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. May also returned for other operations such as
+ * sub-variant switching.
+ */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set doesn't exist */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/*
+ * Returned by MC_CMD_TESTASSERT if the action that should have caused an
+ * assertion failed to do so.
+ */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/*
+ * This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed.
+ */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/*
+ * The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport.
+ */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/*
+ * The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary
+ */
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/*
+ * The operation could not complete because some PIO buffers are
+ * allocated
+ */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_ENUM_BUSES
+ * CDX bus hosts devices (functions) that are implemented using the Composable
+ * DMA subsystem and directly mapped into the memory space of the FGPA PSX
+ * Application Processors (APUs). As such, they only apply to the PSX APU side,
+ * not the host (PCIe). Unlike PCIe, these devices have no native configuration
+ * space or enumeration mechanism, so this message set provides a minimal
+ * interface for discovery and management (bus reset, FLR, BME) of such
+ * devices. This command returns the number of CDX buses present in the system.
+ */
+#define MC_CMD_CDX_BUS_ENUM_BUSES 0x1
+#define MC_CMD_CDX_BUS_ENUM_BUSES_MSGSET 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_BUS_ENUM_BUSES_IN msgrequest */
+#define MC_CMD_CDX_BUS_ENUM_BUSES_IN_LEN 0
+
+/* MC_CMD_CDX_BUS_ENUM_BUSES_OUT msgresponse */
+#define MC_CMD_CDX_BUS_ENUM_BUSES_OUT_LEN 4
+/*
+ * Number of CDX buses present in the system. Buses are numbered 0 to
+ * BUS_COUNT-1
+ */
+#define MC_CMD_CDX_BUS_ENUM_BUSES_OUT_BUS_COUNT_OFST 0
+#define MC_CMD_CDX_BUS_ENUM_BUSES_OUT_BUS_COUNT_LEN 4
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_ENUM_DEVICES
+ * Enumerate CDX bus devices on a given bus
+ */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES 0x2
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_MSGSET 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_BUS_ENUM_DEVICES_IN msgrequest */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_IN_LEN 4
+/*
+ * Bus number to enumerate, in range 0 to BUS_COUNT-1, as returned by
+ * MC_CMD_CDX_BUS_ENUM_BUSES_OUT
+ */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_IN_BUS_LEN 4
+
+/* MC_CMD_CDX_BUS_ENUM_DEVICES_OUT msgresponse */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_LEN 4
+/*
+ * Number of devices present on the bus. Devices on the bus are numbered 0 to
+ * DEVICE_COUNT-1. Returns EAGAIN if number of devices unknown or if the target
+ * devices are not ready (e.g. undergoing a bus reset)
+ */
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_DEVICE_COUNT_OFST 0
+#define MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_DEVICE_COUNT_LEN 4
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_GET_DEVICE_CONFIG
+ * Returns device identification and MMIO/MSI resource data for a CDX device.
+ * The expected usage is for the caller to first retrieve the number of devices
+ * on the bus using MC_CMD_BUS_ENUM_DEVICES, then loop through the range (0,
+ * DEVICE_COUNT - 1), retrieving device resource data. May return EAGAIN if the
+ * number of exposed devices or device resources change during enumeration (due
+ * to e.g. a PL reload / bus reset), in which case the caller is expected to
+ * restart the enumeration loop. MMIO addresses are specified in terms of bus
+ * addresses (prior to any potential IOMMU translation). For versal-net, these
+ * are equivalent to APU physical addresses. Implementation note - for this to
+ * work, the implementation needs to keep state (generation count) per client.
+ */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG 0x3
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_MSGSET 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN msgrequest */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_LEN 8
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_DEVICE_LEN 4
+
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT msgresponse */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN 88
+/* 16-bit Vendor identifier, compliant with PCI-SIG VendorID assignment. */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID_OFST 0
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID_LEN 2
+/* 16-bit Device ID assigned by the vendor */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID_OFST 2
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID_LEN 2
+/*
+ * 16-bit Subsystem Vendor ID, , compliant with PCI-SIG VendorID assignment.
+ * For further device differentiation, as required. 0 if unused.
+ */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_VENDOR_ID_OFST 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_VENDOR_ID_LEN 2
+/*
+ * 16-bit Subsystem Device ID assigned by the vendor. For further device
+ * differentiation, as required. 0 if unused.
+ */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_DEVICE_ID_OFST 6
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_DEVICE_ID_LEN 2
+/* 24-bit Device Class code, compliant with PCI-SIG Device Class codes */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS_OFST 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS_LEN 3
+/* 8-bit vendor-assigned revision */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION_OFST 11
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION_LEN 1
+/* Reserved (alignment) */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_RESERVED_OFST 12
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_RESERVED_LEN 4
+/* MMIO region 0 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_OFST 16
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_OFST 16
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_LBN 128
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_OFST 20
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_LBN 160
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE_HI_WIDTH 32
+/* MMIO region 0 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_OFST 24
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_OFST 24
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_LBN 192
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_OFST 28
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_LBN 224
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE_HI_WIDTH 32
+/* MMIO region 1 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_OFST 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_OFST 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_LBN 256
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_OFST 36
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_LBN 288
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE_HI_WIDTH 32
+/* MMIO region 1 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_OFST 40
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_OFST 40
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_LBN 320
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_OFST 44
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_LBN 352
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE_HI_WIDTH 32
+/* MMIO region 2 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_OFST 48
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_OFST 48
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_LBN 384
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_OFST 52
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_LBN 416
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE_HI_WIDTH 32
+/* MMIO region 2 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_OFST 56
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_OFST 56
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_LBN 448
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_OFST 60
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_LBN 480
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE_HI_WIDTH 32
+/* MMIO region 3 base address (bus address), 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_OFST 64
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_OFST 64
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_LBN 512
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_OFST 68
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_LBN 544
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE_HI_WIDTH 32
+/* MMIO region 3 size, 0 if unused */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_OFST 72
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LEN 8
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_OFST 72
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_LBN 576
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_LO_WIDTH 32
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_OFST 76
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_LEN 4
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_LBN 608
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE_HI_WIDTH 32
+/* MSI vector count */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT_OFST 80
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT_LEN 4
+/* Requester ID used by device (SMMU StreamID, GIC ITS DeviceID) */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_OFST 84
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_LEN 4
+
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2 msgresponse */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN 92
+/* Requester ID used by device for GIC ITS DeviceID */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID_OFST 88
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID_LEN 4
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_DOWN
+ * Asserting reset on the CDX bus causes all devices on the bus to be quiesced.
+ * DMA bus mastering is disabled and any pending DMA request are flushed. Once
+ * the response is returned, the devices are guaranteed to no longer issue DMA
+ * requests or raise MSI interrupts. Further device MMIO accesses may have
+ * undefined results. While the bus reset is asserted, any of the enumeration
+ * or device configuration MCDIs will fail with EAGAIN. It is only legal to
+ * reload the relevant PL region containing CDX devices if the corresponding CDX
+ * bus is in reset. Depending on the implementation, the firmware may or may
+ * not enforce this restriction and it is up to the caller to make sure this
+ * requirement is satisfied.
+ */
+#define MC_CMD_CDX_BUS_DOWN 0x4
+#define MC_CMD_CDX_BUS_DOWN_MSGSET 0x4
+
+/* MC_CMD_CDX_BUS_DOWN_IN msgrequest */
+#define MC_CMD_CDX_BUS_DOWN_IN_LEN 4
+/* Bus number to put in reset, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_BUS_DOWN_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_DOWN_IN_BUS_LEN 4
+
+/*
+ * MC_CMD_CDX_BUS_DOWN_OUT msgresponse: The bus is quiesced, no further
+ * upstream traffic for devices on this bus.
+ */
+#define MC_CMD_CDX_BUS_DOWN_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_BUS_UP
+ * After bus reset is de-asserted, devices are in a state which is functionally
+ * equivalent to each device having been reset with MC_CMD_CDX_DEVICE_RESET. In
+ * other words, device logic is reset in a hardware-specific way, MMIO accesses
+ * are forwarded to the device, DMA bus mastering is disabled and needs to be
+ * re-enabled with MC_CMD_CDX_DEVICE_DMA_ENABLE once the driver is ready to
+ * start servicing DMA. If the underlying number of devices or device resources
+ * changed (e.g. if PL was reloaded) while the bus was in reset, the bus driver
+ * is expected to re-enumerate the bus. Returns EALREADY if the bus was already
+ * up before the call.
+ */
+#define MC_CMD_CDX_BUS_UP 0x5
+#define MC_CMD_CDX_BUS_UP_MSGSET 0x5
+
+/* MC_CMD_CDX_BUS_UP_IN msgrequest */
+#define MC_CMD_CDX_BUS_UP_IN_LEN 4
+/* Bus number to take out of reset, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_BUS_UP_IN_BUS_OFST 0
+#define MC_CMD_CDX_BUS_UP_IN_BUS_LEN 4
+
+/* MC_CMD_CDX_BUS_UP_OUT msgresponse: The bus can now be enumerated. */
+#define MC_CMD_CDX_BUS_UP_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_RESET
+ * After this call completes, device DMA and interrupts are quiesced, devices
+ * logic is reset in a hardware-specific way and DMA bus mastering is disabled.
+ */
+#define MC_CMD_CDX_DEVICE_RESET 0x6
+#define MC_CMD_CDX_DEVICE_RESET_MSGSET 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_RESET_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_RESET_IN_LEN 8
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_RESET_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_RESET_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_RESET_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_RESET_IN_DEVICE_LEN 4
+
+/*
+ * MC_CMD_CDX_DEVICE_RESET_OUT msgresponse: The device is quiesced and all
+ * pending device initiated DMA has completed.
+ */
+#define MC_CMD_CDX_DEVICE_RESET_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_CONTROL_SET
+ * If BUS_MASTER is set to disabled, device DMA and interrupts are quiesced.
+ * Pending DMA requests and MSI interrupts are flushed and no further DMA or
+ * interrupts are issued after this command returns. If BUS_MASTER is set to
+ * enabled, device is allowed to initiate DMA. Whether interrupts are enabled
+ * also depends on the value of MSI_ENABLE bit. Note that, in this case, the
+ * device may start DMA before the host receives and processes the MCDI
+ * response. MSI_ENABLE masks or unmasks device interrupts only. Note that for
+ * interrupts to be delivered to the host, both BUS_MASTER and MSI_ENABLE needs
+ * to be set. MMIO_REGIONS_ENABLE enables or disables host accesses to device
+ * MMIO regions. Note that an implementation is allowed to permanently set this
+ * bit to 1, in which case MC_CMD_CDX_DEVICE_CONTROL_GET will always return 1
+ * for this bit, regardless of the value set here.
+ */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET 0x7
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_MSGSET 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_CONTROL_SET_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_LEN 12
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_DEVICE_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_FLAGS_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_FLAGS_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_LBN 0
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_LBN 1
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MMIO_REGIONS_ENABLE_OFST 8
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MMIO_REGIONS_ENABLE_LBN 2
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MMIO_REGIONS_ENABLE_WIDTH 1
+
+/* MC_CMD_CDX_DEVICE_CONTROL_SET_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_CONTROL_SET_OUT_LEN 0
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_CONTROL_GET
+ * Returns device DMA, interrupt and MMIO region access control bits. See
+ * MC_CMD_CDX_DEVICE_CONTROL_SET for definition of the available control bits.
+ */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET 0x8
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_MSGSET 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_CONTROL_GET_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_LEN 8
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_IN_DEVICE_LEN 4
+
+/* MC_CMD_CDX_DEVICE_CONTROL_GET_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_FLAGS_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_FLAGS_LEN 4
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_BUS_MASTER_ENABLE_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_BUS_MASTER_ENABLE_LBN 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_BUS_MASTER_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MSI_ENABLE_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MSI_ENABLE_LBN 1
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MSI_ENABLE_WIDTH 1
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_OFST 0
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_LBN 2
+#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_WIDTH 1
+
+/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_WRITE_MSI_MSG
+ * Populates the MSI message to be used by the hardware to raise the specified
+ * interrupt vector. Versal-net implementation specific limitations are that
+ * only 4 CDX devices with MSI interrupt capability are supported and all
+ * vectors within a device must use the same write address. The command will
+ * return EINVAL if any of these limitations is violated.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG 0x9
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_MSGSET 0x9
+#undef MC_CMD_0x9_PRIVILEGE_CTG
+
+#define MC_CMD_0x9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_LEN 28
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE_LEN 4
+/*
+ * Device-relative MSI vector number. Must be < MSI_COUNT reported for the
+ * device.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR_OFST 8
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR_LEN 4
+/* Reserved (alignment) */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_RESERVED_OFST 12
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_RESERVED_LEN 4
+/*
+ * MSI address to be used by the hardware. Typically, on ARM systems this
+ * address is translated by the IOMMU (if enabled) and it is the responsibility
+ * of the entity managing the IOMMU (APU kernel) to supply the correct IOVA
+ * here.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_OFST 16
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LEN 8
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_OFST 16
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_LEN 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_LBN 128
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_WIDTH 32
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_OFST 20
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_LEN 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_LBN 160
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_WIDTH 32
+/*
+ * MSI data to be used by the hardware. On versal-net, only the lower 16-bits
+ * are used, the remaining bits are ignored and should be set to zero.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA_OFST 24
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA_LEN 4
+
+/* MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_OUT_LEN 0
+
+/***********************************/
+/* MC_CMD_V2_EXTN - Encapsulation for a v2 extended command */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/*
+ * enum: MCDI command directed to versal-net. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2
+
+#endif /* MC_CDX_PCOL_H */
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
new file mode 100644
index 000000000000..2e82ffc18d89
--- /dev/null
+++ b/drivers/cdx/controller/mcdi.c
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Management-Controller-to-Driver Interface
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <net/netevent.h>
+#include <linux/log2.h>
+#include <linux/net_tstamp.h>
+#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
+
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
+
+static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
+static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
+static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
+ struct cdx_mcdi_cmd *cmd,
+ unsigned int *handle);
+static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ bool allow_retry);
+static void cdx_mcdi_cmd_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd);
+static bool cdx_mcdi_complete_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct cdx_dword *outbuf,
+ int len,
+ struct list_head *cleanup_list);
+static void cdx_mcdi_timeout_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list);
+static void cdx_mcdi_cmd_work(struct work_struct *context);
+static void cdx_mcdi_mode_fail(struct cdx_mcdi *cdx, struct list_head *cleanup_list);
+static void _cdx_mcdi_display_error(struct cdx_mcdi *cdx, unsigned int cmd,
+ size_t inlen, int raw, int arg, int err_no);
+
+static bool cdx_cmd_cancelled(struct cdx_mcdi_cmd *cmd)
+{
+ return cmd->state == MCDI_STATE_RUNNING_CANCELLED;
+}
+
+static void cdx_mcdi_cmd_release(struct kref *ref)
+{
+ kfree(container_of(ref, struct cdx_mcdi_cmd, ref));
+}
+
+static unsigned int cdx_mcdi_cmd_handle(struct cdx_mcdi_cmd *cmd)
+{
+ return cmd->handle;
+}
+
+static void _cdx_mcdi_remove_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list)
+{
+ /* if cancelled, the completers have already been called */
+ if (cdx_cmd_cancelled(cmd))
+ return;
+
+ if (cmd->completer) {
+ list_add_tail(&cmd->cleanup_list, cleanup_list);
+ ++mcdi->outstanding_cleanups;
+ kref_get(&cmd->ref);
+ }
+}
+
+static void cdx_mcdi_remove_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list)
+{
+ list_del(&cmd->list);
+ _cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+ cmd->state = MCDI_STATE_FINISHED;
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ if (list_empty(&mcdi->cmd_list))
+ wake_up(&mcdi->cmd_complete_wq);
+}
+
+static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ if (!cdx->mcdi_ops->mcdi_rpc_timeout)
+ return MCDI_RPC_TIMEOUT;
+ else
+ return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
+}
+
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
+int cdx_mcdi_init(struct cdx_mcdi *cdx)
+{
+ struct cdx_mcdi_iface *mcdi;
+ int rc = -ENOMEM;
+
+ cdx->mcdi = kzalloc(sizeof(*cdx->mcdi), GFP_KERNEL);
+ if (!cdx->mcdi)
+ goto fail;
+
+ mcdi = cdx_mcdi_if(cdx);
+ mcdi->cdx = cdx;
+
+ mcdi->workqueue = alloc_ordered_workqueue("mcdi_wq", 0);
+ if (!mcdi->workqueue)
+ goto fail2;
+ mutex_init(&mcdi->iface_lock);
+ mcdi->mode = MCDI_MODE_EVENTS;
+ INIT_LIST_HEAD(&mcdi->cmd_list);
+ init_waitqueue_head(&mcdi->cmd_complete_wq);
+
+ mcdi->new_epoch = true;
+
+ return 0;
+fail2:
+ kfree(cdx->mcdi);
+ cdx->mcdi = NULL;
+fail:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
+void cdx_mcdi_finish(struct cdx_mcdi *cdx)
+{
+ struct cdx_mcdi_iface *mcdi;
+
+ mcdi = cdx_mcdi_if(cdx);
+ if (!mcdi)
+ return;
+
+ cdx_mcdi_wait_for_cleanup(cdx);
+
+ destroy_workqueue(mcdi->workqueue);
+ kfree(cdx->mcdi);
+ cdx->mcdi = NULL;
+}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
+
+static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
+{
+ bool flushed;
+
+ mutex_lock(&mcdi->iface_lock);
+ flushed = list_empty(&mcdi->cmd_list) &&
+ (ignore_cleanups || !mcdi->outstanding_cleanups);
+ mutex_unlock(&mcdi->iface_lock);
+ return flushed;
+}
+
+/* Wait for outstanding MCDI commands to complete. */
+static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+
+ if (!mcdi)
+ return;
+
+ wait_event(mcdi->cmd_complete_wq,
+ cdx_mcdi_flushed(mcdi, false));
+}
+
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ int rc = 0;
+
+ if (!mcdi)
+ return -EINVAL;
+
+ flush_workqueue(mcdi->workqueue);
+
+ add_wait_queue(&mcdi->cmd_complete_wq, &wait);
+
+ while (!cdx_mcdi_flushed(mcdi, true)) {
+ rc = wait_woken(&wait, TASK_IDLE, timeout_jiffies);
+ if (rc)
+ continue;
+ break;
+ }
+
+ remove_wait_queue(&mcdi->cmd_complete_wq, &wait);
+
+ if (rc > 0)
+ rc = 0;
+ else if (rc == 0)
+ rc = -ETIMEDOUT;
+
+ return rc;
+}
+
+static u8 cdx_mcdi_payload_csum(const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ u8 *p = (u8 *)hdr;
+ u8 csum = 0;
+ int i;
+
+ for (i = 0; i < hdr_len; i++)
+ csum += p[i];
+
+ p = (u8 *)sdu;
+ for (i = 0; i < sdu_len; i++)
+ csum += p[i];
+
+ return ~csum & 0xff;
+}
+
+static void cdx_mcdi_send_request(struct cdx_mcdi *cdx,
+ struct cdx_mcdi_cmd *cmd)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ const struct cdx_dword *inbuf = cmd->inbuf;
+ size_t inlen = cmd->inlen;
+ struct cdx_dword hdr[2];
+ size_t hdr_len;
+ bool not_epoch;
+ u32 xflags;
+
+ if (!mcdi)
+ return;
+
+ mcdi->prev_seq = cmd->seq;
+ mcdi->seq_held_by[cmd->seq] = cmd;
+ mcdi->db_held_by = cmd;
+ cmd->started = jiffies;
+
+ not_epoch = !mcdi->new_epoch;
+ xflags = 0;
+
+ /* MCDI v2 */
+ WARN_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ CDX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, cmd->seq,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, not_epoch);
+ CDX_POPULATE_DWORD_3(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd->cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen,
+ MC_CMD_V2_EXTN_IN_MESSAGE_TYPE,
+ MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM);
+ hdr_len = 8;
+
+ hdr[0].cdx_u32 |= (__force __le32)(cdx_mcdi_payload_csum(hdr, hdr_len, inbuf, inlen) <<
+ MCDI_HEADER_XFLAGS_LBN);
+
+ print_hex_dump_debug("MCDI REQ HEADER: ", DUMP_PREFIX_NONE, 32, 4, hdr, hdr_len, false);
+ print_hex_dump_debug("MCDI REQ PAYLOAD: ", DUMP_PREFIX_NONE, 32, 4, inbuf, inlen, false);
+
+ cdx->mcdi_ops->mcdi_request(cdx, hdr, hdr_len, inbuf, inlen);
+
+ mcdi->new_epoch = false;
+}
+
+static int cdx_mcdi_errno(struct cdx_mcdi *cdx, unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ case MC_CMD_ERR_QUEUE_FULL:
+ return mcdi_err;
+ case MC_CMD_ERR_EPERM:
+ return -EPERM;
+ case MC_CMD_ERR_ENOENT:
+ return -ENOENT;
+ case MC_CMD_ERR_EINTR:
+ return -EINTR;
+ case MC_CMD_ERR_EAGAIN:
+ return -EAGAIN;
+ case MC_CMD_ERR_EACCES:
+ return -EACCES;
+ case MC_CMD_ERR_EBUSY:
+ return -EBUSY;
+ case MC_CMD_ERR_EINVAL:
+ return -EINVAL;
+ case MC_CMD_ERR_ERANGE:
+ return -ERANGE;
+ case MC_CMD_ERR_EDEADLK:
+ return -EDEADLK;
+ case MC_CMD_ERR_ENOSYS:
+ return -EOPNOTSUPP;
+ case MC_CMD_ERR_ETIME:
+ return -ETIME;
+ case MC_CMD_ERR_EALREADY:
+ return -EALREADY;
+ case MC_CMD_ERR_ENOSPC:
+ return -ENOSPC;
+ case MC_CMD_ERR_ENOMEM:
+ return -ENOMEM;
+ case MC_CMD_ERR_ENOTSUP:
+ return -EOPNOTSUPP;
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return -EAGAIN;
+ default:
+ return -EPROTO;
+ }
+}
+
+static void cdx_mcdi_process_cleanup_list(struct cdx_mcdi *cdx,
+ struct list_head *cleanup_list)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ unsigned int cleanups = 0;
+
+ if (!mcdi)
+ return;
+
+ while (!list_empty(cleanup_list)) {
+ struct cdx_mcdi_cmd *cmd =
+ list_first_entry(cleanup_list,
+ struct cdx_mcdi_cmd, cleanup_list);
+ cmd->completer(cdx, cmd->cookie, cmd->rc,
+ cmd->outbuf, cmd->outlen);
+ list_del(&cmd->cleanup_list);
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ ++cleanups;
+ }
+
+ if (cleanups) {
+ bool all_done;
+
+ mutex_lock(&mcdi->iface_lock);
+ CDX_WARN_ON_PARANOID(cleanups > mcdi->outstanding_cleanups);
+ all_done = (mcdi->outstanding_cleanups -= cleanups) == 0;
+ mutex_unlock(&mcdi->iface_lock);
+ if (all_done)
+ wake_up(&mcdi->cmd_complete_wq);
+ }
+}
+
+static void _cdx_mcdi_cancel_cmd(struct cdx_mcdi_iface *mcdi,
+ unsigned int handle,
+ struct list_head *cleanup_list)
+{
+ struct cdx_mcdi_cmd *cmd;
+
+ list_for_each_entry(cmd, &mcdi->cmd_list, list)
+ if (cdx_mcdi_cmd_handle(cmd) == handle) {
+ switch (cmd->state) {
+ case MCDI_STATE_QUEUED:
+ case MCDI_STATE_RETRY:
+ pr_debug("command %#x inlen %zu cancelled in queue\n",
+ cmd->cmd, cmd->inlen);
+ /* if not yet running, properly cancel it */
+ cmd->rc = -EPIPE;
+ cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+ break;
+ case MCDI_STATE_RUNNING:
+ case MCDI_STATE_RUNNING_CANCELLED:
+ case MCDI_STATE_FINISHED:
+ default:
+ /* invalid state? */
+ WARN_ON(1);
+ }
+ break;
+ }
+}
+
+static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ LIST_HEAD(cleanup_list);
+
+ if (!mcdi)
+ return;
+
+ mutex_lock(&mcdi->iface_lock);
+ cdx_mcdi_timeout_cmd(mcdi, cmd, &cleanup_list);
+ mutex_unlock(&mcdi->iface_lock);
+ cdx_mcdi_process_cleanup_list(cdx, &cleanup_list);
+}
+
+struct cdx_mcdi_blocking_data {
+ struct kref ref;
+ bool done;
+ wait_queue_head_t wq;
+ int rc;
+ struct cdx_dword *outbuf;
+ size_t outlen;
+ size_t outlen_actual;
+};
+
+static void cdx_mcdi_blocking_data_release(struct kref *ref)
+{
+ kfree(container_of(ref, struct cdx_mcdi_blocking_data, ref));
+}
+
+static void cdx_mcdi_rpc_completer(struct cdx_mcdi *cdx, unsigned long cookie,
+ int rc, struct cdx_dword *outbuf,
+ size_t outlen_actual)
+{
+ struct cdx_mcdi_blocking_data *wait_data =
+ (struct cdx_mcdi_blocking_data *)cookie;
+
+ wait_data->rc = rc;
+ memcpy(wait_data->outbuf, outbuf,
+ min(outlen_actual, wait_data->outlen));
+ wait_data->outlen_actual = outlen_actual;
+ /* memory barrier */
+ smp_wmb();
+ wait_data->done = true;
+ wake_up(&wait_data->wq);
+ kref_put(&wait_data->ref, cdx_mcdi_blocking_data_release);
+}
+
+static int cdx_mcdi_rpc_sync(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen,
+ size_t *outlen_actual, bool quiet)
+{
+ struct cdx_mcdi_blocking_data *wait_data;
+ struct cdx_mcdi_cmd *cmd_item;
+ unsigned int handle;
+ int rc;
+
+ if (outlen_actual)
+ *outlen_actual = 0;
+
+ wait_data = kmalloc(sizeof(*wait_data), GFP_KERNEL);
+ if (!wait_data)
+ return -ENOMEM;
+
+ cmd_item = kmalloc(sizeof(*cmd_item), GFP_KERNEL);
+ if (!cmd_item) {
+ kfree(wait_data);
+ return -ENOMEM;
+ }
+
+ kref_init(&wait_data->ref);
+ wait_data->done = false;
+ init_waitqueue_head(&wait_data->wq);
+ wait_data->outbuf = outbuf;
+ wait_data->outlen = outlen;
+
+ kref_init(&cmd_item->ref);
+ cmd_item->quiet = quiet;
+ cmd_item->cookie = (unsigned long)wait_data;
+ cmd_item->completer = &cdx_mcdi_rpc_completer;
+ cmd_item->cmd = cmd;
+ cmd_item->inlen = inlen;
+ cmd_item->inbuf = inbuf;
+
+ /* Claim an extra reference for the completer to put. */
+ kref_get(&wait_data->ref);
+ rc = cdx_mcdi_rpc_async_internal(cdx, cmd_item, &handle);
+ if (rc) {
+ kref_put(&wait_data->ref, cdx_mcdi_blocking_data_release);
+ goto out;
+ }
+
+ if (!wait_event_timeout(wait_data->wq, wait_data->done,
+ cdx_mcdi_rpc_timeout(cdx, cmd)) &&
+ !wait_data->done) {
+ pr_err("MC command 0x%x inlen %zu timed out (sync)\n",
+ cmd, inlen);
+
+ cdx_mcdi_cancel_cmd(cdx, cmd_item);
+
+ wait_data->rc = -ETIMEDOUT;
+ wait_data->outlen_actual = 0;
+ }
+
+ if (outlen_actual)
+ *outlen_actual = wait_data->outlen_actual;
+ rc = wait_data->rc;
+
+out:
+ kref_put(&wait_data->ref, cdx_mcdi_blocking_data_release);
+
+ return rc;
+}
+
+static bool cdx_mcdi_get_seq(struct cdx_mcdi_iface *mcdi, unsigned char *seq)
+{
+ *seq = mcdi->prev_seq;
+ do {
+ *seq = (*seq + 1) % ARRAY_SIZE(mcdi->seq_held_by);
+ } while (mcdi->seq_held_by[*seq] && *seq != mcdi->prev_seq);
+ return !mcdi->seq_held_by[*seq];
+}
+
+static int cdx_mcdi_rpc_async_internal(struct cdx_mcdi *cdx,
+ struct cdx_mcdi_cmd *cmd,
+ unsigned int *handle)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+ LIST_HEAD(cleanup_list);
+
+ if (!mcdi) {
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ return -ENETDOWN;
+ }
+
+ if (mcdi->mode == MCDI_MODE_FAIL) {
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ return -ENETDOWN;
+ }
+
+ cmd->mcdi = mcdi;
+ INIT_WORK(&cmd->work, cdx_mcdi_cmd_work);
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_LIST_HEAD(&cmd->cleanup_list);
+ cmd->rc = 0;
+ cmd->outbuf = NULL;
+ cmd->outlen = 0;
+
+ queue_work(mcdi->workqueue, &cmd->work);
+ return 0;
+}
+
+static void cdx_mcdi_cmd_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd)
+{
+ struct cdx_mcdi *cdx = mcdi->cdx;
+ u8 seq;
+
+ if (!mcdi->db_held_by &&
+ cdx_mcdi_get_seq(mcdi, &seq)) {
+ cmd->seq = seq;
+ cmd->reboot_seen = false;
+ cdx_mcdi_send_request(cdx, cmd);
+ cmd->state = MCDI_STATE_RUNNING;
+ } else {
+ cmd->state = MCDI_STATE_QUEUED;
+ }
+}
+
+/* try to advance other commands */
+static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
+ bool allow_retry)
+{
+ struct cdx_mcdi_cmd *cmd, *tmp;
+
+ list_for_each_entry_safe(cmd, tmp, &mcdi->cmd_list, list)
+ if (cmd->state == MCDI_STATE_QUEUED ||
+ (cmd->state == MCDI_STATE_RETRY && allow_retry))
+ cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
+}
+
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
+void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
+{
+ struct cdx_mcdi_iface *mcdi;
+ struct cdx_mcdi_cmd *cmd;
+ LIST_HEAD(cleanup_list);
+ unsigned int respseq;
+
+ if (!len || !outbuf) {
+ pr_err("Got empty MC response\n");
+ return;
+ }
+
+ mcdi = cdx_mcdi_if(cdx);
+ if (!mcdi)
+ return;
+
+ respseq = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_SEQ);
+
+ mutex_lock(&mcdi->iface_lock);
+ cmd = mcdi->seq_held_by[respseq];
+
+ if (cmd) {
+ if (cmd->state == MCDI_STATE_FINISHED) {
+ mutex_unlock(&mcdi->iface_lock);
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ return;
+ }
+
+ cdx_mcdi_complete_cmd(mcdi, cmd, outbuf, len, &cleanup_list);
+ } else {
+ pr_err("MC response unexpected for seq : %0X\n", respseq);
+ }
+
+ mutex_unlock(&mcdi->iface_lock);
+
+ cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
+}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
+
+static void cdx_mcdi_cmd_work(struct work_struct *context)
+{
+ struct cdx_mcdi_cmd *cmd =
+ container_of(context, struct cdx_mcdi_cmd, work);
+ struct cdx_mcdi_iface *mcdi = cmd->mcdi;
+
+ mutex_lock(&mcdi->iface_lock);
+
+ cmd->handle = mcdi->prev_handle++;
+ list_add_tail(&cmd->list, &mcdi->cmd_list);
+ cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
+
+ mutex_unlock(&mcdi->iface_lock);
+}
+
+/*
+ * Returns true if the MCDI module is finished with the command.
+ * (examples of false would be if the command was proxied, or it was
+ * rejected by the MC due to lack of resources and requeued).
+ */
+static bool cdx_mcdi_complete_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct cdx_dword *outbuf,
+ int len,
+ struct list_head *cleanup_list)
+{
+ size_t resp_hdr_len, resp_data_len;
+ struct cdx_mcdi *cdx = mcdi->cdx;
+ unsigned int respcmd, error;
+ bool completed = false;
+ int rc;
+
+ /* ensure the command can't go away before this function returns */
+ kref_get(&cmd->ref);
+
+ respcmd = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_CODE);
+ error = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_ERROR);
+
+ if (respcmd != MC_CMD_V2_EXTN) {
+ resp_hdr_len = 4;
+ resp_data_len = CDX_DWORD_FIELD(outbuf[0], MCDI_HEADER_DATALEN);
+ } else {
+ resp_data_len = 0;
+ resp_hdr_len = 8;
+ if (len >= 8)
+ resp_data_len =
+ CDX_DWORD_FIELD(outbuf[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if ((resp_hdr_len + resp_data_len) > len) {
+ pr_warn("Incomplete MCDI response received %d. Expected %zu\n",
+ len, (resp_hdr_len + resp_data_len));
+ resp_data_len = 0;
+ }
+
+ print_hex_dump_debug("MCDI RESP HEADER: ", DUMP_PREFIX_NONE, 32, 4,
+ outbuf, resp_hdr_len, false);
+ print_hex_dump_debug("MCDI RESP PAYLOAD: ", DUMP_PREFIX_NONE, 32, 4,
+ outbuf + (resp_hdr_len / 4), resp_data_len, false);
+
+ if (error && resp_data_len == 0) {
+ /* MC rebooted during command */
+ rc = -EIO;
+ } else {
+ if (WARN_ON_ONCE(error && resp_data_len < 4))
+ resp_data_len = 4;
+ if (error) {
+ rc = CDX_DWORD_FIELD(outbuf[resp_hdr_len / 4], CDX_DWORD);
+ if (!cmd->quiet) {
+ int err_arg = 0;
+
+ if (resp_data_len >= MC_CMD_ERR_ARG_OFST + 4) {
+ int offset = (resp_hdr_len + MC_CMD_ERR_ARG_OFST) / 4;
+
+ err_arg = CDX_DWORD_VAL(outbuf[offset]);
+ }
+
+ _cdx_mcdi_display_error(cdx, cmd->cmd,
+ cmd->inlen, rc, err_arg,
+ cdx_mcdi_errno(cdx, rc));
+ }
+ rc = cdx_mcdi_errno(cdx, rc);
+ } else {
+ rc = 0;
+ }
+ }
+
+ /* free doorbell */
+ if (mcdi->db_held_by == cmd)
+ mcdi->db_held_by = NULL;
+
+ if (cdx_cmd_cancelled(cmd)) {
+ list_del(&cmd->list);
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+ completed = true;
+ } else if (rc == MC_CMD_ERR_QUEUE_FULL) {
+ cmd->state = MCDI_STATE_RETRY;
+ } else {
+ cmd->rc = rc;
+ cmd->outbuf = outbuf + DIV_ROUND_UP(resp_hdr_len, 4);
+ cmd->outlen = resp_data_len;
+ cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+ completed = true;
+ }
+
+ /* free sequence number and buffer */
+ mcdi->seq_held_by[cmd->seq] = NULL;
+
+ cdx_mcdi_start_or_queue(mcdi, rc != MC_CMD_ERR_QUEUE_FULL);
+
+ /* wake up anyone waiting for flush */
+ wake_up(&mcdi->cmd_complete_wq);
+
+ kref_put(&cmd->ref, cdx_mcdi_cmd_release);
+
+ return completed;
+}
+
+static void cdx_mcdi_timeout_cmd(struct cdx_mcdi_iface *mcdi,
+ struct cdx_mcdi_cmd *cmd,
+ struct list_head *cleanup_list)
+{
+ struct cdx_mcdi *cdx = mcdi->cdx;
+
+ pr_err("MC command 0x%x inlen %zu state %d timed out after %u ms\n",
+ cmd->cmd, cmd->inlen, cmd->state,
+ jiffies_to_msecs(jiffies - cmd->started));
+
+ cmd->rc = -ETIMEDOUT;
+ cdx_mcdi_remove_cmd(mcdi, cmd, cleanup_list);
+
+ cdx_mcdi_mode_fail(cdx, cleanup_list);
+}
+
+/**
+ * cdx_mcdi_rpc - Issue an MCDI command and wait for completion
+ * @cdx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes. Must be a multiple
+ * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
+ * @outbuf: Response buffer. May be %NULL if @outlen is 0.
+ * @outlen: Length of response buffer, in bytes. If the actual
+ * response is longer than @outlen & ~3, it will be truncated
+ * to that length.
+ * @outlen_actual: Pointer through which to return the actual response
+ * length. May be %NULL if this is not needed.
+ *
+ * This function may sleep and therefore must be called in process
+ * context.
+ *
+ * Return: A negative error code, or zero if successful. The error
+ * code may come from the MCDI response or may indicate a failure
+ * to communicate with the MC. In the former case, the response
+ * will still be copied to @outbuf and *@outlen_actual will be
+ * set accordingly. In the latter case, *@outlen_actual will be
+ * set to zero.
+ */
+int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ struct cdx_dword *outbuf, size_t outlen,
+ size_t *outlen_actual)
+{
+ return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
+ outlen_actual, false);
+}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
+
+/**
+ * cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @cdx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in process context, when one of the following occurs:
+ * (a) the completion event is received (in process context)
+ * (b) event queues are disabled (in the process that disables them)
+ */
+int
+cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ struct cdx_mcdi_cmd *cmd_item =
+ kmalloc(sizeof(struct cdx_mcdi_cmd) + inlen, GFP_ATOMIC);
+
+ if (!cmd_item)
+ return -ENOMEM;
+
+ kref_init(&cmd_item->ref);
+ cmd_item->quiet = true;
+ cmd_item->cookie = cookie;
+ cmd_item->completer = complete;
+ cmd_item->cmd = cmd;
+ cmd_item->inlen = inlen;
+ /* inbuf is probably not valid after return, so take a copy */
+ cmd_item->inbuf = (struct cdx_dword *)(cmd_item + 1);
+ memcpy(cmd_item + 1, inbuf, inlen);
+
+ return cdx_mcdi_rpc_async_internal(cdx, cmd_item, NULL);
+}
+
+static void _cdx_mcdi_display_error(struct cdx_mcdi *cdx, unsigned int cmd,
+ size_t inlen, int raw, int arg, int err_no)
+{
+ pr_err("MC command 0x%x inlen %d failed err_no=%d (raw=%d) arg=%d\n",
+ cmd, (int)inlen, err_no, raw, arg);
+}
+
+/*
+ * Set MCDI mode to fail to prevent any new commands, then cancel any
+ * outstanding commands.
+ * Caller must hold the mcdi iface_lock.
+ */
+static void cdx_mcdi_mode_fail(struct cdx_mcdi *cdx, struct list_head *cleanup_list)
+{
+ struct cdx_mcdi_iface *mcdi = cdx_mcdi_if(cdx);
+
+ if (!mcdi)
+ return;
+
+ mcdi->mode = MCDI_MODE_FAIL;
+
+ while (!list_empty(&mcdi->cmd_list)) {
+ struct cdx_mcdi_cmd *cmd;
+
+ cmd = list_first_entry(&mcdi->cmd_list, struct cdx_mcdi_cmd,
+ list);
+ _cdx_mcdi_cancel_cmd(mcdi, cdx_mcdi_cmd_handle(cmd), cleanup_list);
+ }
+}
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
new file mode 100644
index 000000000000..8ae2d99be81e
--- /dev/null
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/module.h>
+
+#include "mcdi_functions.h"
+
+int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_ENUM_BUSES_OUT_LEN);
+ size_t outlen;
+ int ret;
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_ENUM_BUSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_BUS_ENUM_BUSES_OUT_LEN)
+ return -EIO;
+
+ return MCDI_DWORD(outbuf, CDX_BUS_ENUM_BUSES_OUT_BUS_COUNT);
+}
+
+int cdx_mcdi_get_num_devs(struct cdx_mcdi *cdx, int bus_num)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_ENUM_DEVICES_IN_LEN);
+ size_t outlen;
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_ENUM_DEVICES_IN_BUS, bus_num);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_ENUM_DEVICES, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_BUS_ENUM_DEVICES_OUT_LEN)
+ return -EIO;
+
+ return MCDI_DWORD(outbuf, CDX_BUS_ENUM_DEVICES_OUT_DEVICE_COUNT);
+}
+
+int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_dev_params *dev_params)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_LEN);
+ struct resource *res = &dev_params->res[0];
+ size_t outlen;
+ u32 req_id;
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_GET_DEVICE_CONFIG_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_BUS_GET_DEVICE_CONFIG_IN_DEVICE, dev_num);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN)
+ return -EIO;
+
+ dev_params->bus_num = bus_num;
+ dev_params->dev_num = dev_num;
+
+ req_id = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID);
+ dev_params->req_id = req_id;
+
+ dev_params->msi_dev_id = MCDI_DWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID);
+
+ dev_params->res_count = 0;
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION1_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION2_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE) != 0) {
+ res[dev_params->res_count].start =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE);
+ res[dev_params->res_count].end =
+ MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_BASE) +
+ MCDI_QWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION3_SIZE) - 1;
+ res[dev_params->res_count].flags = IORESOURCE_MEM;
+ dev_params->res_count++;
+ }
+
+ dev_params->vendor = MCDI_WORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_VENDOR_ID);
+ dev_params->device = MCDI_WORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_ID);
+ dev_params->subsys_vendor = MCDI_WORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_VENDOR_ID);
+ dev_params->subsys_device = MCDI_WORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_SUBSYS_DEVICE_ID);
+ dev_params->class = MCDI_DWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS) & 0xFFFFFF;
+ dev_params->revision = MCDI_BYTE(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION);
+ dev_params->num_msi = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT);
+
+ return 0;
+}
+
+int cdx_mcdi_bus_enable(struct cdx_mcdi *cdx, u8 bus_num)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_UP_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_UP_IN_BUS, bus_num);
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_UP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
+int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_DOWN_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_BUS_DOWN_IN_BUS, bus_num);
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_BUS_DOWN, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
+int cdx_mcdi_write_msi(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num,
+ u32 msi_vector, u64 msi_address, u32 msi_data)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE, dev_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR, msi_vector);
+ MCDI_SET_QWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS, msi_address);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA, msi_data);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_WRITE_MSI_MSG, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
+int cdx_mcdi_reset_device(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_RESET_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_RESET_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_RESET_IN_DEVICE, dev_num);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_RESET, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
+static int cdx_mcdi_ctrl_flag_get(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, u32 *flags)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_CONTROL_GET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_LEN);
+ size_t outlen;
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_CONTROL_GET_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_CONTROL_GET_IN_DEVICE, dev_num);
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_CONTROL_GET, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
+ if (ret)
+ return ret;
+
+ if (outlen != MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_LEN)
+ return -EIO;
+
+ *flags = MCDI_DWORD(outbuf, CDX_DEVICE_CONTROL_GET_OUT_FLAGS);
+
+ return 0;
+}
+
+static int cdx_mcdi_ctrl_flag_set(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable, int bit_pos)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_CONTROL_SET_IN_LEN);
+ u32 flags;
+ int ret;
+
+ /*
+ * Get flags and then set/reset bit at bit_pos according to
+ * the input params.
+ */
+ ret = cdx_mcdi_ctrl_flag_get(cdx, bus_num, dev_num, &flags);
+ if (ret)
+ return ret;
+
+ flags = flags & (u32)(~(BIT(bit_pos)));
+ if (enable)
+ flags |= (1 << bit_pos);
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_CONTROL_SET_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_CONTROL_SET_IN_DEVICE, dev_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_CONTROL_SET_IN_FLAGS, flags);
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_CONTROL_SET, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+
+ return ret;
+}
+
+int cdx_mcdi_bus_master_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable)
+{
+ return cdx_mcdi_ctrl_flag_set(cdx, bus_num, dev_num, enable,
+ MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_LBN);
+}
+
+int cdx_mcdi_msi_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable)
+{
+ return cdx_mcdi_ctrl_flag_set(cdx, bus_num, dev_num, enable,
+ MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_LBN);
+}
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
new file mode 100644
index 000000000000..57fd1bae706b
--- /dev/null
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Header file for MCDI FW interaction for CDX bus.
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDI_FUNCTIONS_H
+#define CDX_MCDI_FUNCTIONS_H
+
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
+#include "../cdx.h"
+
+/**
+ * cdx_mcdi_get_num_buses - Get the total number of buses on
+ * the controller.
+ * @cdx: pointer to MCDI interface.
+ *
+ * Return: total number of buses available on the controller,
+ * <0 on failure
+ */
+int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx);
+
+/**
+ * cdx_mcdi_get_num_devs - Get the total number of devices on
+ * a particular bus of the controller.
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ *
+ * Return: total number of devices available on the bus, <0 on failure
+ */
+int cdx_mcdi_get_num_devs(struct cdx_mcdi *cdx, int bus_num);
+
+/**
+ * cdx_mcdi_get_dev_config - Get configuration for a particular
+ * bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @dev_params: Pointer to cdx_dev_params, this is populated by this
+ * device with the configuration corresponding to the provided
+ * bus_num:dev_num.
+ *
+ * Return: 0 total number of devices available on the bus, <0 on failure
+ */
+int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
+ u8 bus_num, u8 dev_num,
+ struct cdx_dev_params *dev_params);
+
+/**
+ * cdx_mcdi_bus_enable - Enable CDX bus represented by bus_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_bus_enable(struct cdx_mcdi *cdx, u8 bus_num);
+
+/**
+ * cdx_mcdi_bus_disable - Disable CDX bus represented by bus_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num);
+
+/**
+ * cdx_mcdi_write_msi - Write MSI configuration for CDX device
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @msi_vector: Device-relative MSI vector number.
+ * Must be < MSI_COUNT reported for the device.
+ * @msi_address: MSI address to be used by the hardware. Typically, on ARM
+ * systems this address is translated by the IOMMU (if enabled) and
+ * it is the responsibility of the entity managing the IOMMU (APU kernel)
+ * to supply the correct IOVA here.
+ * @msi_data: MSI data to be used by the hardware. On versal-net, only the
+ * lower 16-bits are used, the remaining bits are ignored and should be
+ * set to zero.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_write_msi(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num,
+ u32 msi_vector, u64 msi_address, u32 msi_data);
+
+/**
+ * cdx_mcdi_reset_device - Reset cdx device represented by bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_reset_device(struct cdx_mcdi *cdx,
+ u8 bus_num, u8 dev_num);
+
+/**
+ * cdx_mcdi_bus_master_enable - Set/Reset bus mastering for cdx device
+ * represented by bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @enable: Enable bus mastering if set, disable otherwise.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_bus_master_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable);
+
+/**
+ * cdx_mcdi_msi_enable - Enable/Disable MSIs for cdx device represented
+ * by bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @enable: Enable msi's if set, disable otherwise.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_msi_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable);
+
+#endif /* CDX_MCDI_FUNCTIONS_H */
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */