diff options
Diffstat (limited to 'drivers/cxl/pci.c')
| -rw-r--r-- | drivers/cxl/pci.c | 2121 |
1 files changed, 823 insertions, 1298 deletions
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 4cf351a3cf99..0be4e508affe 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -1,275 +1,181 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include <uapi/linux/cxl_mem.h> -#include <linux/security.h> -#include <linux/debugfs.h> +#include <linux/unaligned.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/moduleparam.h> #include <linux/module.h> +#include <linux/delay.h> #include <linux/sizes.h> #include <linux/mutex.h> #include <linux/list.h> -#include <linux/cdev.h> -#include <linux/idr.h> #include <linux/pci.h> +#include <linux/aer.h> #include <linux/io.h> -#include <linux/io-64-nonatomic-lo-hi.h> -#include "pci.h" +#include <cxl/mailbox.h> +#include "cxlmem.h" +#include "cxlpci.h" #include "cxl.h" -#include "mem.h" +#include "pmu.h" /** * DOC: cxl pci * * This implements the PCI exclusive functionality for a CXL device as it is * defined by the Compute Express Link specification. CXL devices may surface - * certain functionality even if it isn't CXL enabled. + * certain functionality even if it isn't CXL enabled. While this driver is + * focused around the PCI specific aspects of a CXL device, it binds to the + * specific CXL memory device class code, and therefore the implementation of + * cxl_pci is focused around CXL memory devices. * * The driver has several responsibilities, mainly: * - Create the memX device and register on the CXL bus. * - Enumerate device's register interface and map them. - * - Probe the device attributes to establish sysfs interface. - * - Provide an IOCTL interface to userspace to communicate with the device for - * things like firmware update. + * - Registers nvdimm bridge device with cxl_core. + * - Registers a CXL mailbox with cxl_core. */ -#define cxl_doorbell_busy(cxlm) \ - (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ +#define cxl_doorbell_busy(cxlds) \ + (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ CXLDEV_MBOX_CTRL_DOORBELL) /* CXL 2.0 - 8.2.8.4 */ #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) -enum opcode { - CXL_MBOX_OP_INVALID = 0x0000, - CXL_MBOX_OP_RAW = CXL_MBOX_OP_INVALID, - CXL_MBOX_OP_GET_FW_INFO = 0x0200, - CXL_MBOX_OP_ACTIVATE_FW = 0x0202, - CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, - CXL_MBOX_OP_GET_LOG = 0x0401, - CXL_MBOX_OP_IDENTIFY = 0x4000, - CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, - CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, - CXL_MBOX_OP_GET_LSA = 0x4102, - CXL_MBOX_OP_SET_LSA = 0x4103, - CXL_MBOX_OP_GET_HEALTH_INFO = 0x4200, - CXL_MBOX_OP_GET_ALERT_CONFIG = 0x4201, - CXL_MBOX_OP_SET_ALERT_CONFIG = 0x4202, - CXL_MBOX_OP_GET_SHUTDOWN_STATE = 0x4203, - CXL_MBOX_OP_SET_SHUTDOWN_STATE = 0x4204, - CXL_MBOX_OP_GET_POISON = 0x4300, - CXL_MBOX_OP_INJECT_POISON = 0x4301, - CXL_MBOX_OP_CLEAR_POISON = 0x4302, - CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS = 0x4303, - CXL_MBOX_OP_SCAN_MEDIA = 0x4304, - CXL_MBOX_OP_GET_SCAN_MEDIA = 0x4305, - CXL_MBOX_OP_MAX = 0x10000 -}; - -/** - * struct mbox_cmd - A command to be submitted to hardware. - * @opcode: (input) The command set and command submitted to hardware. - * @payload_in: (input) Pointer to the input payload. - * @payload_out: (output) Pointer to the output payload. Must be allocated by - * the caller. - * @size_in: (input) Number of bytes to load from @payload_in. - * @size_out: (input) Max number of bytes loaded into @payload_out. - * (output) Number of bytes generated by the device. For fixed size - * outputs commands this is always expected to be deterministic. For - * variable sized output commands, it tells the exact number of bytes - * written. - * @return_code: (output) Error code returned from hardware. - * - * This is the primary mechanism used to send commands to the hardware. - * All the fields except @payload_* correspond exactly to the fields described in - * Command Register section of the CXL 2.0 8.2.8.4.5. @payload_in and - * @payload_out are written to, and read from the Command Payload Registers - * defined in CXL 2.0 8.2.8.4.8. - */ -struct mbox_cmd { - u16 opcode; - void *payload_in; - void *payload_out; - size_t size_in; - size_t size_out; - u16 return_code; -#define CXL_MBOX_SUCCESS 0 -}; - -static int cxl_mem_major; -static DEFINE_IDA(cxl_memdev_ida); -static DECLARE_RWSEM(cxl_memdev_rwsem); -static struct dentry *cxl_debugfs; -static bool cxl_raw_allow_all; - -enum { - CEL_UUID, - VENDOR_DEBUG_UUID, -}; - -/* See CXL 2.0 Table 170. Get Log Input Payload */ -static const uuid_t log_uuid[] = { - [CEL_UUID] = UUID_INIT(0xda9c0b5, 0xbf41, 0x4b78, 0x8f, 0x79, 0x96, - 0xb1, 0x62, 0x3b, 0x3f, 0x17), - [VENDOR_DEBUG_UUID] = UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, - 0xd6, 0x07, 0x19, 0x40, 0x3d, 0x86), -}; - -/** - * struct cxl_mem_command - Driver representation of a memory device command - * @info: Command information as it exists for the UAPI - * @opcode: The actual bits used for the mailbox protocol - * @flags: Set of flags effecting driver behavior. - * - * * %CXL_CMD_FLAG_FORCE_ENABLE: In cases of error, commands with this flag - * will be enabled by the driver regardless of what hardware may have - * advertised. - * - * The cxl_mem_command is the driver's internal representation of commands that - * are supported by the driver. Some of these commands may not be supported by - * the hardware. The driver will use @info to validate the fields passed in by - * the user then submit the @opcode to the hardware. - * - * See struct cxl_command_info. - */ -struct cxl_mem_command { - struct cxl_command_info info; - enum opcode opcode; - u32 flags; -#define CXL_CMD_FLAG_NONE 0 -#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0) -}; - -#define CXL_CMD(_id, sin, sout, _flags) \ - [CXL_MEM_COMMAND_ID_##_id] = { \ - .info = { \ - .id = CXL_MEM_COMMAND_ID_##_id, \ - .size_in = sin, \ - .size_out = sout, \ - }, \ - .opcode = CXL_MBOX_OP_##_id, \ - .flags = _flags, \ - } - -/* - * This table defines the supported mailbox commands for the driver. This table - * is made up of a UAPI structure. Non-negative values as parameters in the - * table will be validated against the user's input. For example, if size_in is - * 0, and the user passed in 1, it is an error. - */ -static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = { - CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), -#ifdef CONFIG_CXL_MEM_RAW_COMMANDS - CXL_CMD(RAW, ~0, ~0, 0), -#endif - CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), - CXL_CMD(GET_FW_INFO, 0, 0x50, 0), - CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), - CXL_CMD(GET_LSA, 0x8, ~0, 0), - CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), - CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), - CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), - CXL_CMD(SET_LSA, ~0, 0, 0), - CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), - CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), - CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), - CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, ~0, 0), - CXL_CMD(INJECT_POISON, 0x8, 0, 0), - CXL_CMD(CLEAR_POISON, 0x48, 0, 0), - CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), - CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), -}; - -/* - * Commands that RAW doesn't permit. The rationale for each: - * - * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment / - * coordination of transaction timeout values at the root bridge level. - * - * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live - * and needs to be coordinated with HDM updates. - * - * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the - * driver and any writes from userspace invalidates those contents. - * - * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes - * to the device after it is marked clean, userspace can not make that - * assertion. - * - * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that - * is kept up to date with patrol notifications and error management. - */ -static u16 cxl_disabled_raw_commands[] = { - CXL_MBOX_OP_ACTIVATE_FW, - CXL_MBOX_OP_SET_PARTITION_INFO, - CXL_MBOX_OP_SET_LSA, - CXL_MBOX_OP_SET_SHUTDOWN_STATE, - CXL_MBOX_OP_SCAN_MEDIA, - CXL_MBOX_OP_GET_SCAN_MEDIA, -}; - /* - * Command sets that RAW doesn't permit. All opcodes in this set are - * disabled because they pass plain text security payloads over the - * user/kernel boundary. This functionality is intended to be wrapped - * behind the keys ABI which allows for encrypted payloads in the UAPI + * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to + * dictate how long to wait for the mailbox to become ready. The new + * field allows the device to tell software the amount of time to wait + * before mailbox ready. This field per the spec theoretically allows + * for up to 255 seconds. 255 seconds is unreasonably long, its longer + * than the maximum SATA port link recovery wait. Default to 60 seconds + * until someone builds a CXL device that needs more time in practice. */ -static u8 security_command_sets[] = { - 0x44, /* Sanitize */ - 0x45, /* Persistent Memory Data-at-rest Security */ - 0x46, /* Security Passthrough */ -}; - -#define cxl_for_each_cmd(cmd) \ - for ((cmd) = &mem_commands[0]; \ - ((cmd) - mem_commands) < ARRAY_SIZE(mem_commands); (cmd)++) +static unsigned short mbox_ready_timeout = 60; +module_param(mbox_ready_timeout, ushort, 0644); +MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready"); -#define cxl_cmd_count ARRAY_SIZE(mem_commands) - -static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) +static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) { const unsigned long start = jiffies; unsigned long end = start; - while (cxl_doorbell_busy(cxlm)) { + while (cxl_doorbell_busy(cxlds)) { end = jiffies; if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { /* Check again in case preempted before timeout test */ - if (!cxl_doorbell_busy(cxlm)) + if (!cxl_doorbell_busy(cxlds)) break; return -ETIMEDOUT; } cpu_relax(); } - dev_dbg(&cxlm->pdev->dev, "Doorbell wait took %dms", + dev_dbg(cxlds->dev, "Doorbell wait took %dms", jiffies_to_msecs(end) - jiffies_to_msecs(start)); return 0; } -static bool cxl_is_security_command(u16 opcode) +#define cxl_err(dev, status, msg) \ + dev_err_ratelimited(dev, msg ", device state %s%s\n", \ + status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ + status & CXLMDEV_FW_HALT ? " firmware-halt" : "") + +#define cxl_cmd_err(dev, cmd, status, msg) \ + dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \ + (cmd)->opcode, \ + status & CXLMDEV_DEV_FATAL ? " fatal" : "", \ + status & CXLMDEV_FW_HALT ? " firmware-halt" : "") + +/* + * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique + * wrapper object for each irq within the same cxlds. + */ +struct cxl_dev_id { + struct cxl_dev_state *cxlds; +}; + +static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq, + irq_handler_t thread_fn) +{ + struct device *dev = cxlds->dev; + struct cxl_dev_id *dev_id; + + dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL); + if (!dev_id) + return -ENOMEM; + dev_id->cxlds = cxlds; + + return devm_request_threaded_irq(dev, irq, NULL, thread_fn, + IRQF_SHARED | IRQF_ONESHOT, NULL, + dev_id); +} + +static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds) { - int i; + u64 reg; - for (i = 0; i < ARRAY_SIZE(security_command_sets); i++) - if (security_command_sets[i] == (opcode >> 8)) - return true; - return false; + reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); + return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100; } -static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, - struct mbox_cmd *mbox_cmd) +static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) { - struct device *dev = &cxlm->pdev->dev; + u64 reg; + u16 opcode; + struct cxl_dev_id *dev_id = id; + struct cxl_dev_state *cxlds = dev_id->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + + if (!cxl_mbox_background_complete(cxlds)) + return IRQ_NONE; + + reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); + opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); + if (opcode == CXL_MBOX_OP_SANITIZE) { + mutex_lock(&cxl_mbox->mbox_mutex); + if (mds->security.sanitize_node) + mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0); + mutex_unlock(&cxl_mbox->mbox_mutex); + } else { + /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ + rcuwait_wake_up(&cxl_mbox->mbox_wait); + } - dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", - mbox_cmd->opcode, mbox_cmd->size_in); + return IRQ_HANDLED; +} + +/* + * Sanitization operation polling mode. + */ +static void cxl_mbox_sanitize_work(struct work_struct *work) +{ + struct cxl_memdev_state *mds = + container_of(work, typeof(*mds), security.poll_dwork.work); + struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; + + mutex_lock(&cxl_mbox->mbox_mutex); + if (cxl_mbox_background_complete(cxlds)) { + mds->security.poll_tmo_secs = 0; + if (mds->security.sanitize_node) + sysfs_notify_dirent(mds->security.sanitize_node); + mds->security.sanitize_active = false; + + dev_dbg(cxlds->dev, "Sanitization operation ended\n"); + } else { + int timeout = mds->security.poll_tmo_secs + 10; + + mds->security.poll_tmo_secs = min(15 * 60, timeout); + schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ); + } + mutex_unlock(&cxl_mbox->mbox_mutex); } /** - * __cxl_mem_mbox_send_cmd() - Execute a mailbox command - * @cxlm: The CXL memory device to communicate with. + * __cxl_pci_mbox_send_cmd() - Execute a mailbox command + * @cxl_mbox: CXL mailbox context * @mbox_cmd: Command to send to the memory device. * * Context: Any context. Expects mbox_mutex to be held. @@ -289,15 +195,18 @@ static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, * not need to coordinate with each other. The driver only uses the primary * mailbox. */ -static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, - struct mbox_cmd *mbox_cmd) +static int __cxl_pci_mbox_send_cmd(struct cxl_mailbox *cxl_mbox, + struct cxl_mbox_cmd *mbox_cmd) { - void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; + struct cxl_dev_state *cxlds = mbox_to_cxlds(cxl_mbox); + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; + struct device *dev = cxlds->dev; u64 cmd_reg, status_reg; size_t out_len; int rc; - lockdep_assert_held(&cxlm->mbox_mutex); + lockdep_assert_held(&cxl_mbox->mbox_mutex); /* * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. @@ -317,12 +226,25 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, */ /* #1 */ - if (cxl_doorbell_busy(cxlm)) { - dev_err_ratelimited(&cxlm->pdev->dev, - "Mailbox re-busy after acquiring\n"); + if (cxl_doorbell_busy(cxlds)) { + u64 md_status = + readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); + + cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, + "mailbox queue busy"); return -EBUSY; } + /* + * With sanitize polling, hardware might be done and the poller still + * not be in sync. Ensure no new command comes in until so. Keep the + * hardware semantics and only allow device health status. + */ + if (mds->security.poll_tmo_secs > 0) { + if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO) + return -EBUSY; + } + cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, mbox_cmd->opcode); if (mbox_cmd->size_in) { @@ -335,32 +257,100 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, } /* #2, #3 */ - writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); + writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); /* #4 */ - dev_dbg(&cxlm->pdev->dev, "Sending command\n"); + dev_dbg(dev, "Sending command: 0x%04x\n", mbox_cmd->opcode); writel(CXLDEV_MBOX_CTRL_DOORBELL, - cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); /* #5 */ - rc = cxl_mem_wait_for_doorbell(cxlm); + rc = cxl_pci_mbox_wait_for_doorbell(cxlds); if (rc == -ETIMEDOUT) { - cxl_mem_mbox_timeout(cxlm, mbox_cmd); + u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); + + cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout"); return rc; } /* #6 */ - status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); + status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); mbox_cmd->return_code = FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); - if (mbox_cmd->return_code != 0) { - dev_dbg(&cxlm->pdev->dev, "Mailbox operation had an error\n"); - return 0; + /* + * Handle the background command in a synchronous manner. + * + * All other mailbox commands will serialize/queue on the mbox_mutex, + * which we currently hold. Furthermore this also guarantees that + * cxl_mbox_background_complete() checks are safe amongst each other, + * in that no new bg operation can occur in between. + * + * Background operations are timesliced in accordance with the nature + * of the command. In the event of timeout, the mailbox state is + * indeterminate until the next successful command submission and the + * driver can get back in sync with the hardware state. + */ + if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) { + u64 bg_status_reg; + int i, timeout; + + /* + * Sanitization is a special case which monopolizes the device + * and cannot be timesliced. Handle asynchronously instead, + * and allow userspace to poll(2) for completion. + */ + if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { + if (mds->security.sanitize_active) + return -EBUSY; + + /* give first timeout a second */ + timeout = 1; + mds->security.poll_tmo_secs = timeout; + mds->security.sanitize_active = true; + schedule_delayed_work(&mds->security.poll_dwork, + timeout * HZ); + dev_dbg(dev, "Sanitization operation started\n"); + goto success; + } + + dev_dbg(dev, "Mailbox background operation (0x%04x) started\n", + mbox_cmd->opcode); + + timeout = mbox_cmd->poll_interval_ms; + for (i = 0; i < mbox_cmd->poll_count; i++) { + if (rcuwait_wait_event_timeout(&cxl_mbox->mbox_wait, + cxl_mbox_background_complete(cxlds), + TASK_UNINTERRUPTIBLE, + msecs_to_jiffies(timeout)) > 0) + break; + } + + if (!cxl_mbox_background_complete(cxlds)) { + dev_err(dev, "timeout waiting for background (%d ms)\n", + timeout * mbox_cmd->poll_count); + return -ETIMEDOUT; + } + + bg_status_reg = readq(cxlds->regs.mbox + + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); + mbox_cmd->return_code = + FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK, + bg_status_reg); + dev_dbg(dev, + "Mailbox background operation (0x%04x) completed\n", + mbox_cmd->opcode); + } + + if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) { + dev_dbg(dev, "Mailbox operation had an error: %s\n", + cxl_mbox_cmd_rc2str(mbox_cmd)); + return 0; /* completed but caller must check return_code */ } +success: /* #7 */ - cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); + cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); /* #8 */ @@ -372,8 +362,9 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, * have requested less data than the hardware supplied even * within spec. */ - size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); + size_t n; + n = min3(mbox_cmd->size_out, cxl_mbox->payload_size, out_len); memcpy_fromio(mbox_cmd->payload_out, payload, n); mbox_cmd->size_out = n; } else { @@ -383,1299 +374,833 @@ static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, return 0; } -/** - * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. - * @cxlm: The memory device to gain access to. - * - * Context: Any context. Takes the mbox_mutex. - * Return: 0 if exclusive access was acquired. - */ -static int cxl_mem_mbox_get(struct cxl_mem *cxlm) +static int cxl_pci_mbox_send(struct cxl_mailbox *cxl_mbox, + struct cxl_mbox_cmd *cmd) { - struct device *dev = &cxlm->pdev->dev; - u64 md_status; int rc; - mutex_lock_io(&cxlm->mbox_mutex); - - /* - * XXX: There is some amount of ambiguity in the 2.0 version of the spec - * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the - * bit is to allow firmware running on the device to notify the driver - * that it's ready to receive commands. It is unclear if the bit needs - * to be read for each transaction mailbox, ie. the firmware can switch - * it on and off as needed. Second, there is no defined timeout for - * mailbox ready, like there is for the doorbell interface. - * - * Assumptions: - * 1. The firmware might toggle the Mailbox Interface Ready bit, check - * it for every command. - * - * 2. If the doorbell is clear, the firmware should have first set the - * Mailbox Interface Ready bit. Therefore, waiting for the doorbell - * to be ready is sufficient. - */ - rc = cxl_mem_wait_for_doorbell(cxlm); - if (rc) { - dev_warn(dev, "Mailbox interface not ready\n"); - goto out; - } - - md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); - if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { - dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); - rc = -EBUSY; - goto out; - } - - /* - * Hardware shouldn't allow a ready status but also have failure bits - * set. Spit out an error, this should be a bug report - */ - rc = -EFAULT; - if (md_status & CXLMDEV_DEV_FATAL) { - dev_err(dev, "mbox: reported ready, but fatal\n"); - goto out; - } - if (md_status & CXLMDEV_FW_HALT) { - dev_err(dev, "mbox: reported ready, but halted\n"); - goto out; - } - if (CXLMDEV_RESET_NEEDED(md_status)) { - dev_err(dev, "mbox: reported ready, but reset needed\n"); - goto out; - } + mutex_lock(&cxl_mbox->mbox_mutex); + rc = __cxl_pci_mbox_send_cmd(cxl_mbox, cmd); + mutex_unlock(&cxl_mbox->mbox_mutex); - /* with lock held */ - return 0; - -out: - mutex_unlock(&cxlm->mbox_mutex); return rc; } -/** - * cxl_mem_mbox_put() - Release exclusive access to the mailbox. - * @cxlm: The CXL memory device to communicate with. - * - * Context: Any context. Expects mbox_mutex to be held. - */ -static void cxl_mem_mbox_put(struct cxl_mem *cxlm) +static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail) { - mutex_unlock(&cxlm->mbox_mutex); -} + struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; + const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); + struct device *dev = cxlds->dev; + unsigned long timeout; + int irq, msgnum; + u64 md_status; + u32 ctrl; -/** - * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. - * @cxlm: The CXL memory device to communicate with. - * @cmd: The validated command. - * @in_payload: Pointer to userspace's input payload. - * @out_payload: Pointer to userspace's output payload. - * @size_out: (Input) Max payload size to copy out. - * (Output) Payload size hardware generated. - * @retval: Hardware generated return code from the operation. - * - * Return: - * * %0 - Mailbox transaction succeeded. This implies the mailbox - * protocol completed successfully not that the operation itself - * was successful. - * * %-ENOMEM - Couldn't allocate a bounce buffer. - * * %-EFAULT - Something happened with copy_to/from_user. - * * %-EINTR - Mailbox acquisition interrupted. - * * %-EXXX - Transaction level failures. - * - * Creates the appropriate mailbox command and dispatches it on behalf of a - * userspace request. The input and output payloads are copied between - * userspace. - * - * See cxl_send_cmd(). - */ -static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm, - const struct cxl_mem_command *cmd, - u64 in_payload, u64 out_payload, - s32 *size_out, u32 *retval) -{ - struct device *dev = &cxlm->pdev->dev; - struct mbox_cmd mbox_cmd = { - .opcode = cmd->opcode, - .size_in = cmd->info.size_in, - .size_out = cmd->info.size_out, - }; - int rc; + timeout = jiffies + mbox_ready_timeout * HZ; + do { + md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); + if (md_status & CXLMDEV_MBOX_IF_READY) + break; + if (msleep_interruptible(100)) + break; + } while (!time_after(jiffies, timeout)); - if (cmd->info.size_out) { - mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); - if (!mbox_cmd.payload_out) - return -ENOMEM; + if (!(md_status & CXLMDEV_MBOX_IF_READY)) { + cxl_err(dev, md_status, "timeout awaiting mailbox ready"); + return -ETIMEDOUT; } - if (cmd->info.size_in) { - mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), - cmd->info.size_in); - if (IS_ERR(mbox_cmd.payload_in)) { - kvfree(mbox_cmd.payload_out); - return PTR_ERR(mbox_cmd.payload_in); - } + /* + * A command may be in flight from a previous driver instance, + * think kexec, do one doorbell wait so that + * __cxl_pci_mbox_send_cmd() can assume that it is the only + * source for future doorbell busy events. + */ + if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) { + cxl_err(dev, md_status, "timeout awaiting mailbox idle"); + return -ETIMEDOUT; } - rc = cxl_mem_mbox_get(cxlm); - if (rc) - goto out; - - dev_dbg(dev, - "Submitting %s command for user\n" - "\topcode: %x\n" - "\tsize: %ub\n", - cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, - cmd->info.size_in); - - dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, - "raw command path used\n"); - - rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); - cxl_mem_mbox_put(cxlm); - if (rc) - goto out; + cxl_mbox->mbox_send = cxl_pci_mbox_send; + cxl_mbox->payload_size = + 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); /* - * @size_out contains the max size that's allowed to be written back out - * to userspace. While the payload may have written more output than - * this it will have to be ignored. + * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register + * + * If the size is too small, mandatory commands will not work and so + * there's no point in going forward. If the size is too large, there's + * no harm is soft limiting it. */ - if (mbox_cmd.size_out) { - dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, - "Invalid return size\n"); - if (copy_to_user(u64_to_user_ptr(out_payload), - mbox_cmd.payload_out, mbox_cmd.size_out)) { - rc = -EFAULT; - goto out; - } + cxl_mbox->payload_size = min_t(size_t, cxl_mbox->payload_size, SZ_1M); + if (cxl_mbox->payload_size < 256) { + dev_err(dev, "Mailbox is too small (%zub)", + cxl_mbox->payload_size); + return -ENXIO; } - *size_out = mbox_cmd.size_out; - *retval = mbox_cmd.return_code; + dev_dbg(dev, "Mailbox payload sized %zu", cxl_mbox->payload_size); -out: - kvfree(mbox_cmd.payload_in); - kvfree(mbox_cmd.payload_out); - return rc; -} - -static bool cxl_mem_raw_command_allowed(u16 opcode) -{ - int i; + INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work); - if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS)) - return false; - - if (security_locked_down(LOCKDOWN_NONE)) - return false; + /* background command interrupts are optional */ + if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail) + return 0; - if (cxl_raw_allow_all) - return true; + msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); + irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum); + if (irq < 0) + return 0; - if (cxl_is_security_command(opcode)) - return false; + if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq)) + return 0; - for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++) - if (cxl_disabled_raw_commands[i] == opcode) - return false; + dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n"); + /* enable background command mbox irq support */ + ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); + ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; + writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); - return true; + return 0; } -/** - * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. - * @cxlm: &struct cxl_mem device whose mailbox will be used. - * @send_cmd: &struct cxl_send_command copied in from userspace. - * @out_cmd: Sanitized and populated &struct cxl_mem_command. - * - * Return: - * * %0 - @out_cmd is ready to send. - * * %-ENOTTY - Invalid command specified. - * * %-EINVAL - Reserved fields or invalid values were used. - * * %-ENOMEM - Input or output buffer wasn't sized properly. - * * %-EPERM - Attempted to use a protected command. - * - * The result of this command is a fully validated command in @out_cmd that is - * safe to send to the hardware. - * - * See handle_mailbox_cmd_from_user() +/* + * Assume that any RCIEP that emits the CXL memory expander class code + * is an RCD */ -static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm, - const struct cxl_send_command *send_cmd, - struct cxl_mem_command *out_cmd) +static bool is_cxl_restricted(struct pci_dev *pdev) { - const struct cxl_command_info *info; - struct cxl_mem_command *c; - - if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) - return -ENOTTY; - - /* - * The user can never specify an input payload larger than what hardware - * supports, but output can be arbitrarily large (simply write out as - * much data as the hardware provides). - */ - if (send_cmd->in.size > cxlm->payload_size) - return -EINVAL; - - /* - * Checks are bypassed for raw commands but a WARN/taint will occur - * later in the callchain - */ - if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { - const struct cxl_mem_command temp = { - .info = { - .id = CXL_MEM_COMMAND_ID_RAW, - .flags = 0, - .size_in = send_cmd->in.size, - .size_out = send_cmd->out.size, - }, - .opcode = send_cmd->raw.opcode - }; - - if (send_cmd->raw.rsvd) - return -EINVAL; - - /* - * Unlike supported commands, the output size of RAW commands - * gets passed along without further checking, so it must be - * validated here. - */ - if (send_cmd->out.size > cxlm->payload_size) - return -EINVAL; - - if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) - return -EPERM; - - memcpy(out_cmd, &temp, sizeof(temp)); - - return 0; - } - - if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) - return -EINVAL; - - if (send_cmd->rsvd) - return -EINVAL; - - if (send_cmd->in.rsvd || send_cmd->out.rsvd) - return -EINVAL; + return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END; +} - /* Convert user's command into the internal representation */ - c = &mem_commands[send_cmd->id]; - info = &c->info; +static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev, + struct cxl_register_map *map, + struct cxl_dport *dport) +{ + resource_size_t component_reg_phys; - /* Check that the command is enabled for hardware */ - if (!test_bit(info->id, cxlm->enabled_cmds)) - return -ENOTTY; + *map = (struct cxl_register_map) { + .host = &pdev->dev, + .resource = CXL_RESOURCE_NONE, + }; - /* Check the input buffer is the expected size */ - if (info->size_in >= 0 && info->size_in != send_cmd->in.size) - return -ENOMEM; + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); + if (!port) + return -EPROBE_DEFER; - /* Check the output buffer is at least large enough */ - if (info->size_out >= 0 && send_cmd->out.size < info->size_out) - return -ENOMEM; + component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport); + if (component_reg_phys == CXL_RESOURCE_NONE) + return -ENXIO; - memcpy(out_cmd, c, sizeof(*c)); - out_cmd->info.size_in = send_cmd->in.size; - /* - * XXX: out_cmd->info.size_out will be controlled by the driver, and the - * specified number of bytes @send_cmd->out.size will be copied back out - * to userspace. - */ + map->resource = component_reg_phys; + map->reg_type = CXL_REGLOC_RBI_COMPONENT; + map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; return 0; } -static int cxl_query_cmd(struct cxl_memdev *cxlmd, - struct cxl_mem_query_commands __user *q) +static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, + struct cxl_register_map *map) { - struct device *dev = &cxlmd->dev; - struct cxl_mem_command *cmd; - u32 n_commands; - int j = 0; - - dev_dbg(dev, "Query IOCTL\n"); - - if (get_user(n_commands, &q->n_commands)) - return -EFAULT; + int rc; - /* returns the total number if 0 elements are requested. */ - if (n_commands == 0) - return put_user(cxl_cmd_count, &q->n_commands); + rc = cxl_find_regblock(pdev, type, map); /* - * otherwise, return max(n_commands, total commands) cxl_command_info - * structures. + * If the Register Locator DVSEC does not exist, check if it + * is an RCH and try to extract the Component Registers from + * an RCRB. */ - cxl_for_each_cmd(cmd) { - const struct cxl_command_info *info = &cmd->info; + if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) { + struct cxl_dport *dport; + struct cxl_port *port __free(put_cxl_port) = + cxl_pci_find_port(pdev, &dport); + if (!port) + return -EPROBE_DEFER; + + rc = cxl_rcrb_get_comp_regs(pdev, map, dport); + if (rc) + return rc; - if (copy_to_user(&q->commands[j++], info, sizeof(*info))) - return -EFAULT; + rc = cxl_dport_map_rcd_linkcap(pdev, dport); + if (rc) + return rc; - if (j == n_commands) - break; + } else if (rc) { + return rc; } - return 0; + return cxl_setup_regs(map); } -static int cxl_send_cmd(struct cxl_memdev *cxlmd, - struct cxl_send_command __user *s) +static int cxl_pci_ras_unmask(struct pci_dev *pdev) { - struct cxl_mem *cxlm = cxlmd->cxlm; - struct device *dev = &cxlmd->dev; - struct cxl_send_command send; - struct cxl_mem_command c; + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + void __iomem *addr; + u32 orig_val, val, mask; + u16 cap; int rc; - dev_dbg(dev, "Send IOCTL\n"); + if (!cxlds->regs.ras) { + dev_dbg(&pdev->dev, "No RAS registers.\n"); + return 0; + } - if (copy_from_user(&send, s, sizeof(send))) - return -EFAULT; + /* BIOS has PCIe AER error control */ + if (!pcie_aer_is_native(pdev)) + return 0; - rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c); + rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap); if (rc) return rc; - /* Prepare to handle a full payload for variable sized output */ - if (c.info.size_out < 0) - c.info.size_out = cxlm->payload_size; - - rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload, - send.out.payload, &send.out.size, - &send.retval); - if (rc) - return rc; + if (cap & PCI_EXP_DEVCTL_URRE) { + addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET; + orig_val = readl(addr); + + mask = CXL_RAS_UNCORRECTABLE_MASK_MASK | + CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK; + val = orig_val & ~mask; + writel(val, addr); + dev_dbg(&pdev->dev, + "Uncorrectable RAS Errors Mask: %#x -> %#x\n", + orig_val, val); + } - if (copy_to_user(s, &send, sizeof(send))) - return -EFAULT; + if (cap & PCI_EXP_DEVCTL_CERE) { + addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET; + orig_val = readl(addr); + val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK; + writel(val, addr); + dev_dbg(&pdev->dev, "Correctable RAS Errors Mask: %#x -> %#x\n", + orig_val, val); + } return 0; } -static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, - unsigned long arg) +static void free_event_buf(void *buf) { - switch (cmd) { - case CXL_MEM_QUERY_COMMANDS: - return cxl_query_cmd(cxlmd, (void __user *)arg); - case CXL_MEM_SEND_COMMAND: - return cxl_send_cmd(cxlmd, (void __user *)arg); - default: - return -ENOTTY; - } + kvfree(buf); } -static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +/* + * There is a single buffer for reading event logs from the mailbox. All logs + * share this buffer protected by the mds->event_log_lock. + */ +static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds) { - struct cxl_memdev *cxlmd = file->private_data; - int rc = -ENXIO; + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_get_event_payload *buf; - down_read(&cxl_memdev_rwsem); - if (cxlmd->cxlm) - rc = __cxl_memdev_ioctl(cxlmd, cmd, arg); - up_read(&cxl_memdev_rwsem); + buf = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + mds->event.buf = buf; - return rc; + return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf); } -static int cxl_memdev_open(struct inode *inode, struct file *file) +static bool cxl_alloc_irq_vectors(struct pci_dev *pdev) { - struct cxl_memdev *cxlmd = - container_of(inode->i_cdev, typeof(*cxlmd), cdev); - - get_device(&cxlmd->dev); - file->private_data = cxlmd; + int nvecs; - return 0; + /* + * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must + * not generate INTx messages if that function participates in + * CXL.cache or CXL.mem. + * + * Additionally pci_alloc_irq_vectors() handles calling + * pci_free_irq_vectors() automatically despite not being called + * pcim_*. See pci_setup_msi_context(). + */ + nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS, + PCI_IRQ_MSIX | PCI_IRQ_MSI); + if (nvecs < 1) { + dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs); + return false; + } + return true; } -static int cxl_memdev_release_file(struct inode *inode, struct file *file) +static irqreturn_t cxl_event_thread(int irq, void *id) { - struct cxl_memdev *cxlmd = - container_of(inode->i_cdev, typeof(*cxlmd), cdev); + struct cxl_dev_id *dev_id = id; + struct cxl_dev_state *cxlds = dev_id->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + u32 status; - put_device(&cxlmd->dev); + do { + /* + * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status; + * ignore the reserved upper 32 bits + */ + status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET); + /* Ignore logs unknown to the driver */ + status &= CXLDEV_EVENT_STATUS_ALL; + if (!status) + break; + cxl_mem_get_event_records(mds, status); + cond_resched(); + } while (status); - return 0; + return IRQ_HANDLED; } -static const struct file_operations cxl_memdev_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = cxl_memdev_ioctl, - .open = cxl_memdev_open, - .release = cxl_memdev_release_file, - .compat_ioctl = compat_ptr_ioctl, - .llseek = noop_llseek, -}; - -static inline struct cxl_mem_command *cxl_mem_find_command(u16 opcode) +static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting) { - struct cxl_mem_command *c; + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int irq; - cxl_for_each_cmd(c) - if (c->opcode == opcode) - return c; + if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX) + return -ENXIO; + + irq = pci_irq_vector(pdev, + FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting)); + if (irq < 0) + return irq; - return NULL; + return cxl_request_irq(cxlds, irq, cxl_event_thread); } -/** - * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device. - * @cxlm: The CXL memory device to communicate with. - * @opcode: Opcode for the mailbox command. - * @in: The input payload for the mailbox command. - * @in_size: The length of the input payload - * @out: Caller allocated buffer for the output. - * @out_size: Expected size of output. - * - * Context: Any context. Will acquire and release mbox_mutex. - * Return: - * * %>=0 - Number of bytes returned in @out. - * * %-E2BIG - Payload is too large for hardware. - * * %-EBUSY - Couldn't acquire exclusive mailbox access. - * * %-EFAULT - Hardware error occurred. - * * %-ENXIO - Command completed, but device reported an error. - * * %-EIO - Unexpected output size. - * - * Mailbox commands may execute successfully yet the device itself reported an - * error. While this distinction can be useful for commands from userspace, the - * kernel will only be able to use results when both are successful. - * - * See __cxl_mem_mbox_send_cmd() - */ -static int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, - void *in, size_t in_size, - void *out, size_t out_size) +static int cxl_event_get_int_policy(struct cxl_memdev_state *mds, + struct cxl_event_interrupt_policy *policy) { - const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - struct mbox_cmd mbox_cmd = { - .opcode = opcode, - .payload_in = in, - .size_in = in_size, - .size_out = out_size, - .payload_out = out, + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_cmd mbox_cmd = { + .opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY, + .payload_out = policy, + .size_out = sizeof(*policy), }; int rc; - if (out_size > cxlm->payload_size) - return -E2BIG; - - rc = cxl_mem_mbox_get(cxlm); - if (rc) - return rc; - - rc = __cxl_mem_mbox_send_cmd(cxlm, &mbox_cmd); - cxl_mem_mbox_put(cxlm); - if (rc) - return rc; - - /* TODO: Map return code to proper kernel style errno */ - if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) - return -ENXIO; - - /* - * Variable sized commands can't be validated and so it's up to the - * caller to do that if they wish. - */ - if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) - return -EIO; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0) + dev_err(mds->cxlds.dev, + "Failed to get event interrupt policy : %d", rc); - return 0; + return rc; } -static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) +static int cxl_event_config_msgnums(struct cxl_memdev_state *mds, + struct cxl_event_interrupt_policy *policy) { - const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_cmd mbox_cmd; + int rc; - cxlm->payload_size = - 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); + *policy = (struct cxl_event_interrupt_policy) { + .info_settings = CXL_INT_MSI_MSIX, + .warn_settings = CXL_INT_MSI_MSIX, + .failure_settings = CXL_INT_MSI_MSIX, + .fatal_settings = CXL_INT_MSI_MSIX, + }; - /* - * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register - * - * If the size is too small, mandatory commands will not work and so - * there's no point in going forward. If the size is too large, there's - * no harm is soft limiting it. - */ - cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); - if (cxlm->payload_size < 256) { - dev_err(&cxlm->pdev->dev, "Mailbox is too small (%zub)", - cxlm->payload_size); - return -ENXIO; - } + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY, + .payload_in = policy, + .size_in = sizeof(*policy), + }; - dev_dbg(&cxlm->pdev->dev, "Mailbox payload sized %zu", - cxlm->payload_size); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0) { + dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d", + rc); + return rc; + } - return 0; + /* Retrieve final interrupt settings */ + return cxl_event_get_int_policy(mds, policy); } -static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev) +static int cxl_event_irqsetup(struct cxl_memdev_state *mds) { - struct device *dev = &pdev->dev; - struct cxl_mem *cxlm; + struct cxl_dev_state *cxlds = &mds->cxlds; + struct cxl_event_interrupt_policy policy; + int rc; - cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL); - if (!cxlm) { - dev_err(dev, "No memory available\n"); - return ERR_PTR(-ENOMEM); - } + rc = cxl_event_config_msgnums(mds, &policy); + if (rc) + return rc; - mutex_init(&cxlm->mbox_mutex); - cxlm->pdev = pdev; - cxlm->enabled_cmds = - devm_kmalloc_array(dev, BITS_TO_LONGS(cxl_cmd_count), - sizeof(unsigned long), - GFP_KERNEL | __GFP_ZERO); - if (!cxlm->enabled_cmds) { - dev_err(dev, "No memory available for bitmap\n"); - return ERR_PTR(-ENOMEM); + rc = cxl_event_req_irq(cxlds, policy.info_settings); + if (rc) { + dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n"); + return rc; } - return cxlm; -} - -static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, - u8 bar, u64 offset) -{ - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; - void __iomem *addr; - - /* Basic sanity check that BAR is big enough */ - if (pci_resource_len(pdev, bar) < offset) { - dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, - &pdev->resource[bar], (unsigned long long)offset); - return IOMEM_ERR_PTR(-ENXIO); + rc = cxl_event_req_irq(cxlds, policy.warn_settings); + if (rc) { + dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n"); + return rc; } - addr = pci_iomap(pdev, bar, 0); - if (!addr) { - dev_err(dev, "failed to map registers\n"); - return addr; + rc = cxl_event_req_irq(cxlds, policy.failure_settings); + if (rc) { + dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n"); + return rc; } - dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", - bar, offset); + rc = cxl_event_req_irq(cxlds, policy.fatal_settings); + if (rc) { + dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n"); + return rc; + } - return addr; + return 0; } -static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) +static bool cxl_event_int_is_fw(u8 setting) { - pci_iounmap(cxlm->pdev, base); + u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting); + + return mode == CXL_INT_FW; } -static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) +static int cxl_event_config(struct pci_host_bridge *host_bridge, + struct cxl_memdev_state *mds, bool irq_avail) { - int pos; + struct cxl_event_interrupt_policy policy; + int rc; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); - if (!pos) + /* + * When BIOS maintains CXL error reporting control, it will process + * event records. Only one agent can do so. + */ + if (!host_bridge->native_cxl_error) return 0; - while (pos) { - u16 vendor, id; + if (!irq_avail) { + dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n"); + return 0; + } - pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); - pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); - if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) - return pos; + rc = cxl_event_get_int_policy(mds, &policy); + if (rc) + return rc; - pos = pci_find_next_ext_capability(pdev, pos, - PCI_EXT_CAP_ID_DVSEC); + if (cxl_event_int_is_fw(policy.info_settings) || + cxl_event_int_is_fw(policy.warn_settings) || + cxl_event_int_is_fw(policy.failure_settings) || + cxl_event_int_is_fw(policy.fatal_settings)) { + dev_err(mds->cxlds.dev, + "FW still in control of Event Logs despite _OSC settings\n"); + return -EBUSY; } - return 0; -} - -static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, - struct cxl_register_map *map) -{ - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; - struct cxl_component_reg_map *comp_map; - struct cxl_device_reg_map *dev_map; - - switch (map->reg_type) { - case CXL_REGLOC_RBI_COMPONENT: - comp_map = &map->component_map; - cxl_probe_component_regs(dev, base, comp_map); - if (!comp_map->hdm_decoder.valid) { - dev_err(dev, "HDM decoder registers not found\n"); - return -ENXIO; - } + rc = cxl_mem_alloc_event_buf(mds); + if (rc) + return rc; - dev_dbg(dev, "Set up component registers\n"); - break; - case CXL_REGLOC_RBI_MEMDEV: - dev_map = &map->device_map; - cxl_probe_device_regs(dev, base, dev_map); - if (!dev_map->status.valid || !dev_map->mbox.valid || - !dev_map->memdev.valid) { - dev_err(dev, "registers not found: %s%s%s\n", - !dev_map->status.valid ? "status " : "", - !dev_map->mbox.valid ? "status " : "", - !dev_map->memdev.valid ? "status " : ""); - return -ENXIO; - } + rc = cxl_event_irqsetup(mds); + if (rc) + return rc; - dev_dbg(dev, "Probing device registers...\n"); - break; - default: - break; - } + cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL); return 0; } -static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) +static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds) { - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; + int rc; - switch (map->reg_type) { - case CXL_REGLOC_RBI_COMPONENT: - cxl_map_component_regs(pdev, &cxlm->regs.component, map); - dev_dbg(dev, "Mapping component registers...\n"); - break; - case CXL_REGLOC_RBI_MEMDEV: - cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); - dev_dbg(dev, "Probing device registers...\n"); - break; - default: - break; - } + /* + * Fail the init if there's no mailbox. For a type3 this is out of spec. + */ + if (!cxlds->reg_map.device_map.mbox.valid) + return -ENODEV; - return 0; -} + rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev); + if (rc) + return rc; -static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, - u8 *bar, u64 *offset, u8 *reg_type) -{ - *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); - *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); - *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); + return 0; } -/** - * cxl_mem_setup_regs() - Setup necessary MMIO. - * @cxlm: The CXL memory device to communicate with. - * - * Return: 0 if all necessary registers mapped. - * - * A memory device is required by spec to implement a certain set of MMIO - * regions. The purpose of this function is to enumerate and map those - * registers. - */ -static int cxl_mem_setup_regs(struct cxl_mem *cxlm) +static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size_t width) { - struct pci_dev *pdev = cxlm->pdev; - struct device *dev = &pdev->dev; - u32 regloc_size, regblocks; - void __iomem *base; - int regloc, i; - struct cxl_register_map *map, *n; - LIST_HEAD(register_maps); - int ret = 0; - - regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); - if (!regloc) { - dev_err(dev, "register location dvsec not found\n"); + struct cxl_dev_state *cxlds = dev_get_drvdata(dev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *root_dev; + struct cxl_dport *dport; + struct cxl_port *root __free(put_cxl_port) = + cxl_mem_find_port(cxlmd, &dport); + + if (!root) return -ENXIO; - } - - if (pci_request_mem_regions(pdev, pci_name(pdev))) - return -ENODEV; - - /* Get the size of the Register Locator DVSEC */ - pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); - regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); - - regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; - regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; - - for (i = 0; i < regblocks; i++, regloc += 8) { - u32 reg_lo, reg_hi; - u8 reg_type; - u64 offset; - u8 bar; - - map = kzalloc(sizeof(*map), GFP_KERNEL); - if (!map) { - ret = -ENOMEM; - goto free_maps; - } - - list_add(&map->list, ®ister_maps); - - pci_read_config_dword(pdev, regloc, ®_lo); - pci_read_config_dword(pdev, regloc + 4, ®_hi); - - cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, - ®_type); - dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", - bar, offset, reg_type); - - base = cxl_mem_map_regblock(cxlm, bar, offset); - if (!base) { - ret = -ENOMEM; - goto free_maps; - } - - map->barno = bar; - map->block_offset = offset; - map->reg_type = reg_type; - - ret = cxl_probe_regs(cxlm, base + offset, map); - - /* Always unmap the regblock regardless of probe success */ - cxl_mem_unmap_regblock(cxlm, base); - - if (ret) - goto free_maps; - } + root_dev = root->uport_dev; + if (!root_dev) + return -ENXIO; - pci_release_mem_regions(pdev); + if (!dport->regs.rcd_pcie_cap) + return -ENXIO; - list_for_each_entry(map, ®ister_maps, list) { - ret = cxl_map_regs(cxlm, map); - if (ret) - goto free_maps; - } + guard(device)(root_dev); + if (!root_dev->driver) + return -ENXIO; -free_maps: - list_for_each_entry_safe(map, n, ®ister_maps, list) { - list_del(&map->list); - kfree(map); + switch (width) { + case 2: + return sysfs_emit(buf, "%#x\n", + readw(dport->regs.rcd_pcie_cap + offset)); + case 4: + return sysfs_emit(buf, "%#x\n", + readl(dport->regs.rcd_pcie_cap + offset)); + default: + return -EINVAL; } - - return ret; -} - -static struct cxl_memdev *to_cxl_memdev(struct device *dev) -{ - return container_of(dev, struct cxl_memdev, dev); } -static void cxl_memdev_release(struct device *dev) +static ssize_t rcd_link_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - - ida_free(&cxl_memdev_ida, cxlmd->id); - kfree(cxlmd); + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCAP, buf, sizeof(u32)); } +static DEVICE_ATTR_RO(rcd_link_cap); -static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid, - kgid_t *gid) +static ssize_t rcd_link_ctrl_show(struct device *dev, + struct device_attribute *attr, char *buf) { - return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCTL, buf, sizeof(u16)); } +static DEVICE_ATTR_RO(rcd_link_ctrl); -static ssize_t firmware_version_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t rcd_link_status_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version); + return rcd_pcie_cap_emit(dev, PCI_EXP_LNKSTA, buf, sizeof(u16)); } -static DEVICE_ATTR_RO(firmware_version); +static DEVICE_ATTR_RO(rcd_link_status); -static ssize_t payload_max_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%zu\n", cxlm->payload_size); -} -static DEVICE_ATTR_RO(payload_max); +static struct attribute *cxl_rcd_attrs[] = { + &dev_attr_rcd_link_cap.attr, + &dev_attr_rcd_link_ctrl.attr, + &dev_attr_rcd_link_status.attr, + NULL +}; -static ssize_t label_storage_size_show(struct device *dev, - struct device_attribute *attr, char *buf) +static umode_t cxl_rcd_visible(struct kobject *kobj, struct attribute *a, int n) { - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - - return sysfs_emit(buf, "%zu\n", cxlm->lsa_size); -} -static DEVICE_ATTR_RO(label_storage_size); + struct device *dev = kobj_to_dev(kobj); + struct pci_dev *pdev = to_pci_dev(dev); -static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - unsigned long long len = range_len(&cxlm->ram_range); + if (is_cxl_restricted(pdev)) + return a->mode; - return sysfs_emit(buf, "%#llx\n", len); + return 0; } -static struct device_attribute dev_attr_ram_size = - __ATTR(size, 0444, ram_size_show, NULL); +static struct attribute_group cxl_rcd_group = { + .attrs = cxl_rcd_attrs, + .is_visible = cxl_rcd_visible, +}; +__ATTRIBUTE_GROUPS(cxl_rcd); -static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, - char *buf) +static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_mem *cxlm = cxlmd->cxlm; - unsigned long long len = range_len(&cxlm->pmem_range); - - return sysfs_emit(buf, "%#llx\n", len); -} + struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus); + struct cxl_dpa_info range_info = { 0 }; + struct cxl_memdev_state *mds; + struct cxl_dev_state *cxlds; + struct cxl_register_map map; + struct cxl_memdev *cxlmd; + int rc, pmu_count; + unsigned int i; + bool irq_avail; -static struct device_attribute dev_attr_pmem_size = - __ATTR(size, 0444, pmem_size_show, NULL); + /* + * Double check the anonymous union trickery in struct cxl_regs + * FIXME switch to struct_group() + */ + BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != + offsetof(struct cxl_regs, device_regs.memdev)); -static struct attribute *cxl_memdev_attributes[] = { - &dev_attr_firmware_version.attr, - &dev_attr_payload_max.attr, - &dev_attr_label_storage_size.attr, - NULL, -}; + rc = pcim_enable_device(pdev); + if (rc) + return rc; + pci_set_master(pdev); + + mds = cxl_memdev_state_create(&pdev->dev); + if (IS_ERR(mds)) + return PTR_ERR(mds); + cxlds = &mds->cxlds; + pci_set_drvdata(pdev, cxlds); + + cxlds->rcd = is_cxl_restricted(pdev); + cxlds->serial = pci_get_dsn(pdev); + cxlds->cxl_dvsec = pci_find_dvsec_capability( + pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); + if (!cxlds->cxl_dvsec) + dev_warn(&pdev->dev, + "Device DVSEC not present, skip CXL.mem init\n"); + + rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); + if (rc) + return rc; -static struct attribute *cxl_memdev_pmem_attributes[] = { - &dev_attr_pmem_size.attr, - NULL, -}; + rc = cxl_map_device_regs(&map, &cxlds->regs.device_regs); + if (rc) + return rc; -static struct attribute *cxl_memdev_ram_attributes[] = { - &dev_attr_ram_size.attr, - NULL, -}; + /* + * If the component registers can't be found, the cxl_pci driver may + * still be useful for management functions so don't return an error. + */ + rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, + &cxlds->reg_map); + if (rc) + dev_warn(&pdev->dev, "No component registers (%d)\n", rc); + else if (!cxlds->reg_map.component_map.ras.valid) + dev_dbg(&pdev->dev, "RAS registers not found\n"); -static struct attribute_group cxl_memdev_attribute_group = { - .attrs = cxl_memdev_attributes, -}; + rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component, + BIT(CXL_CM_CAP_CAP_ID_RAS)); + if (rc) + dev_dbg(&pdev->dev, "Failed to map RAS capability.\n"); -static struct attribute_group cxl_memdev_ram_attribute_group = { - .name = "ram", - .attrs = cxl_memdev_ram_attributes, -}; + rc = cxl_pci_type3_init_mailbox(cxlds); + if (rc) + return rc; -static struct attribute_group cxl_memdev_pmem_attribute_group = { - .name = "pmem", - .attrs = cxl_memdev_pmem_attributes, -}; + rc = cxl_await_media_ready(cxlds); + if (rc == 0) + cxlds->media_ready = true; + else + dev_warn(&pdev->dev, "Media not active (%d)\n", rc); -static const struct attribute_group *cxl_memdev_attribute_groups[] = { - &cxl_memdev_attribute_group, - &cxl_memdev_ram_attribute_group, - &cxl_memdev_pmem_attribute_group, - NULL, -}; + irq_avail = cxl_alloc_irq_vectors(pdev); -static const struct device_type cxl_memdev_type = { - .name = "cxl_memdev", - .release = cxl_memdev_release, - .devnode = cxl_memdev_devnode, - .groups = cxl_memdev_attribute_groups, -}; + rc = cxl_pci_setup_mailbox(mds, irq_avail); + if (rc) + return rc; -static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd) -{ - down_write(&cxl_memdev_rwsem); - cxlmd->cxlm = NULL; - up_write(&cxl_memdev_rwsem); -} + rc = cxl_enumerate_cmds(mds); + if (rc) + return rc; -static void cxl_memdev_unregister(void *_cxlmd) -{ - struct cxl_memdev *cxlmd = _cxlmd; - struct device *dev = &cxlmd->dev; + rc = cxl_set_timestamp(mds); + if (rc) + return rc; - cdev_device_del(&cxlmd->cdev, dev); - cxl_memdev_shutdown(cxlmd); - put_device(dev); -} + rc = cxl_poison_state_init(mds); + if (rc) + return rc; -static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm) -{ - struct pci_dev *pdev = cxlm->pdev; - struct cxl_memdev *cxlmd; - struct device *dev; - struct cdev *cdev; - int rc; + rc = cxl_dev_state_identify(mds); + if (rc) + return rc; - cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL); - if (!cxlmd) - return ERR_PTR(-ENOMEM); + rc = cxl_mem_dpa_fetch(mds, &range_info); + if (rc) + return rc; - rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL); - if (rc < 0) - goto err; - cxlmd->id = rc; - - dev = &cxlmd->dev; - device_initialize(dev); - dev->parent = &pdev->dev; - dev->bus = &cxl_bus_type; - dev->devt = MKDEV(cxl_mem_major, cxlmd->id); - dev->type = &cxl_memdev_type; - device_set_pm_not_required(dev); - - cdev = &cxlmd->cdev; - cdev_init(cdev, &cxl_memdev_fops); - return cxlmd; - -err: - kfree(cxlmd); - return ERR_PTR(rc); -} + rc = cxl_dpa_setup(cxlds, &range_info); + if (rc) + return rc; -static struct cxl_memdev *devm_cxl_add_memdev(struct device *host, - struct cxl_mem *cxlm) -{ - struct cxl_memdev *cxlmd; - struct device *dev; - struct cdev *cdev; - int rc; + rc = devm_cxl_setup_features(cxlds); + if (rc) + dev_dbg(&pdev->dev, "No CXL Features discovered\n"); - cxlmd = cxl_memdev_alloc(cxlm); + cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds); if (IS_ERR(cxlmd)) - return cxlmd; + return PTR_ERR(cxlmd); - dev = &cxlmd->dev; - rc = dev_set_name(dev, "mem%d", cxlmd->id); + rc = devm_cxl_setup_fw_upload(&pdev->dev, mds); if (rc) - goto err; - - /* - * Activate ioctl operations, no cxl_memdev_rwsem manipulation - * needed as this is ordered with cdev_add() publishing the device. - */ - cxlmd->cxlm = cxlm; + return rc; - cdev = &cxlmd->cdev; - rc = cdev_device_add(cdev, dev); + rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd); if (rc) - goto err; + return rc; - rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd); + rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd); if (rc) - return ERR_PTR(rc); - return cxlmd; + dev_dbg(&pdev->dev, "No CXL FWCTL setup\n"); -err: - /* - * The cdev was briefly live, shutdown any ioctl operations that - * saw that state. - */ - cxl_memdev_shutdown(cxlmd); - put_device(dev); - return ERR_PTR(rc); -} + pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU); + if (pmu_count < 0) + return pmu_count; -static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out) -{ - u32 remaining = size; - u32 offset = 0; - - while (remaining) { - u32 xfer_size = min_t(u32, remaining, cxlm->payload_size); - struct cxl_mbox_get_log { - uuid_t uuid; - __le32 offset; - __le32 length; - } __packed log = { - .uuid = *uuid, - .offset = cpu_to_le32(offset), - .length = cpu_to_le32(xfer_size) - }; - int rc; - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log, - sizeof(log), out, xfer_size); - if (rc < 0) - return rc; - - out += xfer_size; - remaining -= xfer_size; - offset += xfer_size; - } - - return 0; -} + for (i = 0; i < pmu_count; i++) { + struct cxl_pmu_regs pmu_regs; -/** - * cxl_walk_cel() - Walk through the Command Effects Log. - * @cxlm: Device. - * @size: Length of the Command Effects Log. - * @cel: CEL - * - * Iterate over each entry in the CEL and determine if the driver supports the - * command. If so, the command is enabled for the device and can be used later. - */ -static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel) -{ - struct cel_entry { - __le16 opcode; - __le16 effect; - } __packed * cel_entry; - const int cel_entries = size / sizeof(*cel_entry); - int i; - - cel_entry = (struct cel_entry *)cel; - - for (i = 0; i < cel_entries; i++) { - u16 opcode = le16_to_cpu(cel_entry[i].opcode); - struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - - if (!cmd) { - dev_dbg(&cxlm->pdev->dev, - "Opcode 0x%04x unsupported by driver", opcode); - continue; + rc = cxl_find_regblock_instance(pdev, CXL_REGLOC_RBI_PMU, &map, i); + if (rc) { + dev_dbg(&pdev->dev, "Could not find PMU regblock\n"); + break; } - set_bit(cmd->info.id, cxlm->enabled_cmds); - } -} - -struct cxl_mbox_get_supported_logs { - __le16 entries; - u8 rsvd[6]; - struct gsl_entry { - uuid_t uuid; - __le32 size; - } __packed entry[]; -} __packed; - -static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm) -{ - struct cxl_mbox_get_supported_logs *ret; - int rc; - - ret = kvmalloc(cxlm->payload_size, GFP_KERNEL); - if (!ret) - return ERR_PTR(-ENOMEM); - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, - 0, ret, cxlm->payload_size); - if (rc < 0) { - kvfree(ret); - return ERR_PTR(rc); - } - - return ret; -} - -/** - * cxl_mem_enumerate_cmds() - Enumerate commands for a device. - * @cxlm: The device. - * - * Returns 0 if enumerate completed successfully. - * - * CXL devices have optional support for certain commands. This function will - * determine the set of supported commands for the hardware and update the - * enabled_cmds bitmap in the @cxlm. - */ -static int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm) -{ - struct cxl_mbox_get_supported_logs *gsl; - struct device *dev = &cxlm->pdev->dev; - struct cxl_mem_command *cmd; - int i, rc; - - gsl = cxl_get_gsl(cxlm); - if (IS_ERR(gsl)) - return PTR_ERR(gsl); - - rc = -ENOENT; - for (i = 0; i < le16_to_cpu(gsl->entries); i++) { - u32 size = le32_to_cpu(gsl->entry[i].size); - uuid_t uuid = gsl->entry[i].uuid; - u8 *log; - - dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size); - - if (!uuid_equal(&uuid, &log_uuid[CEL_UUID])) - continue; - - log = kvmalloc(size, GFP_KERNEL); - if (!log) { - rc = -ENOMEM; - goto out; + rc = cxl_map_pmu_regs(&map, &pmu_regs); + if (rc) { + dev_dbg(&pdev->dev, "Could not map PMU regs\n"); + break; } - rc = cxl_xfer_log(cxlm, &uuid, size, log); + rc = devm_cxl_pmu_add(cxlds->dev, &pmu_regs, cxlmd->id, i, CXL_PMU_MEMDEV); if (rc) { - kvfree(log); - goto out; + dev_dbg(&pdev->dev, "Could not add PMU instance\n"); + break; } + } - cxl_walk_cel(cxlm, size, log); - kvfree(log); + rc = cxl_event_config(host_bridge, mds, irq_avail); + if (rc) + return rc; - /* In case CEL was bogus, enable some default commands. */ - cxl_for_each_cmd(cmd) - if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) - set_bit(cmd->info.id, cxlm->enabled_cmds); + if (cxl_pci_ras_unmask(pdev)) + dev_dbg(&pdev->dev, "No RAS reporting unmasked\n"); - /* Found the required CEL */ - rc = 0; - } + pci_save_state(pdev); -out: - kvfree(gsl); return rc; } -/** - * cxl_mem_identify() - Send the IDENTIFY command to the device. - * @cxlm: The device to identify. - * - * Return: 0 if identify was executed successfully. - * - * This will dispatch the identify command to the device and on success populate - * structures to be exported to sysfs. - */ -static int cxl_mem_identify(struct cxl_mem *cxlm) -{ - /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ - struct cxl_mbox_identify { - char fw_revision[0x10]; - __le64 total_capacity; - __le64 volatile_capacity; - __le64 persistent_capacity; - __le64 partition_align; - __le16 info_event_log_size; - __le16 warning_event_log_size; - __le16 failure_event_log_size; - __le16 fatal_event_log_size; - __le32 lsa_size; - u8 poison_list_max_mer[3]; - __le16 inject_poison_limit; - u8 poison_caps; - u8 qos_telemetry_caps; - } __packed id; - int rc; - - rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, - sizeof(id)); - if (rc < 0) - return rc; - - /* - * TODO: enumerate DPA map, as 'ram' and 'pmem' do not alias. - * For now, only the capacity is exported in sysfs - */ - cxlm->ram_range.start = 0; - cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1; - - cxlm->pmem_range.start = 0; - cxlm->pmem_range.end = - le64_to_cpu(id.persistent_capacity) * SZ_256M - 1; +static const struct pci_device_id cxl_mem_pci_tbl[] = { + /* PCI class code for CXL.mem Type-3 Devices */ + { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, + { /* terminate list */ }, +}; +MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); - cxlm->lsa_size = le32_to_cpu(id.lsa_size); - memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision)); +static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; - return 0; + dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n", + dev_name(dev)); + pci_restore_state(pdev); + if (device_attach(dev) <= 0) + return PCI_ERS_RESULT_DISCONNECT; + return PCI_ERS_RESULT_RECOVERED; } -static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) +static void cxl_error_resume(struct pci_dev *pdev) { - struct cxl_memdev *cxlmd; - struct cxl_mem *cxlm; - int rc; - - rc = pcim_enable_device(pdev); - if (rc) - return rc; - - cxlm = cxl_mem_create(pdev); - if (IS_ERR(cxlm)) - return PTR_ERR(cxlm); - - rc = cxl_mem_setup_regs(cxlm); - if (rc) - return rc; - - rc = cxl_mem_setup_mailbox(cxlm); - if (rc) - return rc; - - rc = cxl_mem_enumerate_cmds(cxlm); - if (rc) - return rc; - - rc = cxl_mem_identify(cxlm); - if (rc) - return rc; + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &cxlmd->dev; - cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlm); - if (IS_ERR(cxlmd)) - return PTR_ERR(cxlmd); + dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev), + dev->driver ? "successful" : "failed"); +} - if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) - rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); +static void cxl_reset_done(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &pdev->dev; - return rc; + /* + * FLR does not expect to touch the HDM decoders and related + * registers. SBR, however, will wipe all device configurations. + * Issue a warning if there was an active decoder before the reset + * that no longer exists. + */ + guard(device)(&cxlmd->dev); + if (cxlmd->endpoint && + cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) { + dev_crit(dev, "SBR happened without memory regions removal.\n"); + dev_crit(dev, "System may be unstable if regions hosted system memory.\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } } -static const struct pci_device_id cxl_mem_pci_tbl[] = { - /* PCI class code for CXL.mem Type-3 Devices */ - { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, - { /* terminate list */ }, +static const struct pci_error_handlers cxl_error_handlers = { + .error_detected = cxl_error_detected, + .slot_reset = cxl_slot_reset, + .resume = cxl_error_resume, + .cor_error_detected = cxl_cor_error_detected, + .reset_done = cxl_reset_done, }; -MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); -static struct pci_driver cxl_mem_driver = { +static struct pci_driver cxl_pci_driver = { .name = KBUILD_MODNAME, .id_table = cxl_mem_pci_tbl, - .probe = cxl_mem_probe, + .probe = cxl_pci_probe, + .err_handler = &cxl_error_handlers, + .dev_groups = cxl_rcd_groups, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; -static __init int cxl_mem_init(void) +#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0) +static void cxl_handle_cper_event(enum cxl_event_type ev_type, + struct cxl_cper_event_rec *rec) { - struct dentry *mbox_debugfs; - dev_t devt; - int rc; + struct cper_cxl_event_devid *device_id = &rec->hdr.device_id; + struct pci_dev *pdev __free(pci_dev_put) = NULL; + enum cxl_event_log_type log_type; + struct cxl_dev_state *cxlds; + unsigned int devfn; + u32 hdr_flags; + + pr_debug("CPER event %d for device %u:%u:%u.%u\n", ev_type, + device_id->segment_num, device_id->bus_num, + device_id->device_num, device_id->func_num); + + devfn = PCI_DEVFN(device_id->device_num, device_id->func_num); + pdev = pci_get_domain_bus_and_slot(device_id->segment_num, + device_id->bus_num, devfn); + if (!pdev) + return; + + guard(device)(&pdev->dev); + if (pdev->driver != &cxl_pci_driver) + return; + + cxlds = pci_get_drvdata(pdev); + if (!cxlds) + return; + + /* Fabricate a log type */ + hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags); + log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags); + + cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type, + &uuid_null, &rec->event); +} - /* Double check the anonymous union trickery in struct cxl_regs */ - BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != - offsetof(struct cxl_regs, device_regs.memdev)); +static void cxl_cper_work_fn(struct work_struct *work) +{ + struct cxl_cper_work_data wd; - rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); - if (rc) - return rc; + while (cxl_cper_kfifo_get(&wd)) + cxl_handle_cper_event(wd.event_type, &wd.rec); +} +static DECLARE_WORK(cxl_cper_work, cxl_cper_work_fn); - cxl_mem_major = MAJOR(devt); +static int __init cxl_pci_driver_init(void) +{ + int rc; - rc = pci_register_driver(&cxl_mem_driver); - if (rc) { - unregister_chrdev_region(MKDEV(cxl_mem_major, 0), - CXL_MEM_MAX_DEVS); + rc = pci_register_driver(&cxl_pci_driver); + if (rc) return rc; - } - cxl_debugfs = debugfs_create_dir("cxl", NULL); - mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); - debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, - &cxl_raw_allow_all); + rc = cxl_cper_register_work(&cxl_cper_work); + if (rc) + pci_unregister_driver(&cxl_pci_driver); - return 0; + return rc; } -static __exit void cxl_mem_exit(void) +static void __exit cxl_pci_driver_exit(void) { - debugfs_remove_recursive(cxl_debugfs); - pci_unregister_driver(&cxl_mem_driver); - unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); + cxl_cper_unregister_work(&cxl_cper_work); + cancel_work_sync(&cxl_cper_work); + pci_unregister_driver(&cxl_pci_driver); } +module_init(cxl_pci_driver_init); +module_exit(cxl_pci_driver_exit); +MODULE_DESCRIPTION("CXL: PCI manageability"); MODULE_LICENSE("GPL v2"); -module_init(cxl_mem_init); -module_exit(cxl_mem_exit); -MODULE_IMPORT_NS(CXL); +MODULE_IMPORT_NS("CXL"); |
