diff options
Diffstat (limited to 'drivers/cxl/core/mbox.c')
| -rw-r--r-- | drivers/cxl/core/mbox.c | 1034 |
1 files changed, 862 insertions, 172 deletions
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 0c90f13870a4..fa6dd0c94656 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1,13 +1,17 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/security.h> #include <linux/debugfs.h> +#include <linux/ktime.h> #include <linux/mutex.h> +#include <linux/unaligned.h> +#include <cxlpci.h> #include <cxlmem.h> #include <cxl.h> #include "core.h" +#include "trace.h" +#include "mce.h" static bool cxl_raw_allow_all; @@ -53,18 +57,17 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), + CXL_CMD(CLEAR_LOG, 0x10, 0, 0), + CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0), - CXL_CMD(INJECT_POISON, 0x8, 0, 0), - CXL_CMD(CLEAR_POISON, 0x48, 0, 0), CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), - CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), + CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0), }; /* @@ -85,6 +88,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { * * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that * is kept up to date with patrol notifications and error management. + * + * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel + * driver orchestration for safety. */ static u16 cxl_disabled_raw_commands[] = { CXL_MBOX_OP_ACTIVATE_FW, @@ -93,6 +99,9 @@ static u16 cxl_disabled_raw_commands[] = { CXL_MBOX_OP_SET_SHUTDOWN_STATE, CXL_MBOX_OP_SCAN_MEDIA, CXL_MBOX_OP_GET_SCAN_MEDIA, + CXL_MBOX_OP_GET_POISON, + CXL_MBOX_OP_INJECT_POISON, + CXL_MBOX_OP_CLEAR_POISON, }; /* @@ -117,6 +126,82 @@ static bool cxl_is_security_command(u16 opcode) return false; } +static void cxl_set_security_cmd_enabled(struct cxl_security_state *security, + u16 opcode) +{ + switch (opcode) { + case CXL_MBOX_OP_SANITIZE: + set_bit(CXL_SEC_ENABLED_SANITIZE, security->enabled_cmds); + break; + case CXL_MBOX_OP_SECURE_ERASE: + set_bit(CXL_SEC_ENABLED_SECURE_ERASE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SECURITY_STATE: + set_bit(CXL_SEC_ENABLED_GET_SECURITY_STATE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_SET_PASSPHRASE: + set_bit(CXL_SEC_ENABLED_SET_PASSPHRASE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_DISABLE_PASSPHRASE: + set_bit(CXL_SEC_ENABLED_DISABLE_PASSPHRASE, + security->enabled_cmds); + break; + case CXL_MBOX_OP_UNLOCK: + set_bit(CXL_SEC_ENABLED_UNLOCK, security->enabled_cmds); + break; + case CXL_MBOX_OP_FREEZE_SECURITY: + set_bit(CXL_SEC_ENABLED_FREEZE_SECURITY, + security->enabled_cmds); + break; + case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: + set_bit(CXL_SEC_ENABLED_PASSPHRASE_SECURE_ERASE, + security->enabled_cmds); + break; + default: + break; + } +} + +static bool cxl_is_poison_command(u16 opcode) +{ +#define CXL_MBOX_OP_POISON_CMDS 0x43 + + if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS) + return true; + + return false; +} + +static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison, + u16 opcode) +{ + switch (opcode) { + case CXL_MBOX_OP_GET_POISON: + set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds); + break; + case CXL_MBOX_OP_INJECT_POISON: + set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds); + break; + case CXL_MBOX_OP_CLEAR_POISON: + set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS: + set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds); + break; + case CXL_MBOX_OP_SCAN_MEDIA: + set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SCAN_MEDIA: + set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds); + break; + default: + break; + } +} + static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) { struct cxl_mem_command *c; @@ -140,13 +225,9 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) } /** - * cxl_mbox_send_cmd() - Send a mailbox command to a device. - * @cxlds: The device data for the operation - * @opcode: Opcode for the mailbox command. - * @in: The input payload for the mailbox command. - * @in_size: The length of the input payload - * @out: Caller allocated buffer for the output. - * @out_size: Expected size of output. + * cxl_internal_send_cmd() - Kernel internal interface to send a mailbox command + * @cxl_mbox: CXL mailbox context + * @mbox_cmd: initialized command to execute * * Context: Any context. * Return: @@ -161,40 +242,47 @@ static const char *cxl_mem_opcode_to_name(u16 opcode) * error. While this distinction can be useful for commands from userspace, the * kernel will only be able to use results when both are successful. */ -int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, - size_t in_size, void *out, size_t out_size) +int cxl_internal_send_cmd(struct cxl_mailbox *cxl_mbox, + struct cxl_mbox_cmd *mbox_cmd) { - const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - struct cxl_mbox_cmd mbox_cmd = { - .opcode = opcode, - .payload_in = in, - .size_in = in_size, - .size_out = out_size, - .payload_out = out, - }; + size_t out_size, min_out; int rc; - if (in_size > cxlds->payload_size || out_size > cxlds->payload_size) + if (mbox_cmd->size_in > cxl_mbox->payload_size || + mbox_cmd->size_out > cxl_mbox->payload_size) return -E2BIG; - rc = cxlds->mbox_send(cxlds, &mbox_cmd); + out_size = mbox_cmd->size_out; + min_out = mbox_cmd->min_out; + rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd); + /* + * EIO is reserved for a payload size mismatch and mbox_send() + * may not return this error. + */ + if (WARN_ONCE(rc == -EIO, "Bad return code: -EIO")) + return -ENXIO; if (rc) return rc; - if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS) - return cxl_mbox_cmd_rc2errno(&mbox_cmd); + if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS && + mbox_cmd->return_code != CXL_MBOX_CMD_RC_BACKGROUND) + return cxl_mbox_cmd_rc2errno(mbox_cmd); + + if (!out_size) + return 0; /* - * Variable sized commands can't be validated and so it's up to the - * caller to do that if they wish. + * Variable sized output needs to at least satisfy the caller's + * minimum if not the fully requested size. */ - if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) { - if (mbox_cmd.size_out != out_size) - return -EIO; - } + if (min_out == 0) + min_out = out_size; + + if (mbox_cmd->size_out < min_out) + return -EIO; return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_internal_send_cmd, "CXL"); static bool cxl_mem_raw_command_allowed(u16 opcode) { @@ -247,45 +335,54 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) return false; break; } + case CXL_MBOX_OP_CLEAR_LOG: { + const uuid_t *uuid = (uuid_t *)payload_in; + + /* + * Restrict the ‘Clear log’ action to only apply to + * Vendor debug logs. + */ + return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); + } default: break; } return true; } -static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, - struct cxl_dev_state *cxlds, u16 opcode, +static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox_cmd, + struct cxl_mailbox *cxl_mbox, u16 opcode, size_t in_size, size_t out_size, u64 in_payload) { - *mbox = (struct cxl_mbox_cmd) { + *mbox_cmd = (struct cxl_mbox_cmd) { .opcode = opcode, .size_in = in_size, }; if (in_size) { - mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), - in_size); - if (IS_ERR(mbox->payload_in)) - return PTR_ERR(mbox->payload_in); + mbox_cmd->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), + in_size); + if (IS_ERR(mbox_cmd->payload_in)) + return PTR_ERR(mbox_cmd->payload_in); - if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { - dev_dbg(cxlds->dev, "%s: input payload not allowed\n", + if (!cxl_payload_from_user_allowed(opcode, mbox_cmd->payload_in)) { + dev_dbg(cxl_mbox->host, "%s: input payload not allowed\n", cxl_mem_opcode_to_name(opcode)); - kvfree(mbox->payload_in); + kvfree(mbox_cmd->payload_in); return -EBUSY; } } /* Prepare to handle a full payload for variable sized output */ if (out_size == CXL_VARIABLE_PAYLOAD) - mbox->size_out = cxlds->payload_size; + mbox_cmd->size_out = cxl_mbox->payload_size; else - mbox->size_out = out_size; + mbox_cmd->size_out = out_size; - if (mbox->size_out) { - mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL); - if (!mbox->payload_out) { - kvfree(mbox->payload_in); + if (mbox_cmd->size_out) { + mbox_cmd->payload_out = kvzalloc(mbox_cmd->size_out, GFP_KERNEL); + if (!mbox_cmd->payload_out) { + kvfree(mbox_cmd->payload_in); return -ENOMEM; } } @@ -300,7 +397,7 @@ static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, const struct cxl_send_command *send_cmd, - struct cxl_dev_state *cxlds) + struct cxl_mailbox *cxl_mbox) { if (send_cmd->raw.rsvd) return -EINVAL; @@ -310,13 +407,13 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, * gets passed along without further checking, so it must be * validated here. */ - if (send_cmd->out.size > cxlds->payload_size) + if (send_cmd->out.size > cxl_mbox->payload_size) return -EINVAL; if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) return -EPERM; - dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n"); + dev_WARN_ONCE(cxl_mbox->host, true, "raw command path used\n"); *mem_cmd = (struct cxl_mem_command) { .info = { @@ -332,7 +429,7 @@ static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, const struct cxl_send_command *send_cmd, - struct cxl_dev_state *cxlds) + struct cxl_mailbox *cxl_mbox) { struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; const struct cxl_command_info *info = &c->info; @@ -347,11 +444,11 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, return -EINVAL; /* Check that the command is enabled for hardware */ - if (!test_bit(info->id, cxlds->enabled_cmds)) + if (!test_bit(info->id, cxl_mbox->enabled_cmds)) return -ENOTTY; /* Check that the command is not claimed for exclusive kernel use */ - if (test_bit(info->id, cxlds->exclusive_cmds)) + if (test_bit(info->id, cxl_mbox->exclusive_cmds)) return -EBUSY; /* Check the input buffer is the expected size */ @@ -380,7 +477,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, /** * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. - * @cxlds: The device data for the operation + * @cxl_mbox: CXL mailbox context * @send_cmd: &struct cxl_send_command copied in from userspace. * * Return: @@ -395,7 +492,7 @@ static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, * safe to send to the hardware. */ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, - struct cxl_dev_state *cxlds, + struct cxl_mailbox *cxl_mbox, const struct cxl_send_command *send_cmd) { struct cxl_mem_command mem_cmd; @@ -409,28 +506,28 @@ static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, * supports, but output can be arbitrarily large (simply write out as * much data as the hardware provides). */ - if (send_cmd->in.size > cxlds->payload_size) + if (send_cmd->in.size > cxl_mbox->payload_size) return -EINVAL; /* Sanitize and construct a cxl_mem_command */ if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) - rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds); + rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxl_mbox); else - rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds); + rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxl_mbox); if (rc) return rc; /* Sanitize and construct a cxl_mbox_cmd */ - return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode, + return cxl_mbox_cmd_ctor(mbox_cmd, cxl_mbox, mem_cmd.opcode, mem_cmd.info.size_in, mem_cmd.info.size_out, send_cmd->in.payload); } -int cxl_query_cmd(struct cxl_memdev *cxlmd, +int cxl_query_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_mem_query_commands __user *q) { - struct device *dev = &cxlmd->dev; + struct device *dev = cxl_mbox->host; struct cxl_mem_command *cmd; u32 n_commands; int j = 0; @@ -445,13 +542,18 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands); /* - * otherwise, return max(n_commands, total commands) cxl_command_info + * otherwise, return min(n_commands, total commands) cxl_command_info * structures. */ cxl_for_each_cmd(cmd) { - const struct cxl_command_info *info = &cmd->info; + struct cxl_command_info info = cmd->info; + + if (test_bit(info.id, cxl_mbox->enabled_cmds)) + info.flags |= CXL_MEM_COMMAND_FLAG_ENABLED; + if (test_bit(info.id, cxl_mbox->exclusive_cmds)) + info.flags |= CXL_MEM_COMMAND_FLAG_EXCLUSIVE; - if (copy_to_user(&q->commands[j++], info, sizeof(*info))) + if (copy_to_user(&q->commands[j++], &info, sizeof(info))) return -EFAULT; if (j == n_commands) @@ -463,7 +565,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, /** * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. - * @cxlds: The device data for the operation + * @cxl_mbox: The mailbox context for the operation. * @mbox_cmd: The validated mailbox command. * @out_payload: Pointer to userspace's output payload. * @size_out: (Input) Max payload size to copy out. @@ -484,12 +586,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, * * See cxl_send_cmd(). */ -static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds, +static int handle_mailbox_cmd_from_user(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *mbox_cmd, u64 out_payload, s32 *size_out, u32 *retval) { - struct device *dev = cxlds->dev; + struct device *dev = cxl_mbox->host; int rc; dev_dbg(dev, @@ -499,7 +601,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds, cxl_mem_opcode_to_name(mbox_cmd->opcode), mbox_cmd->opcode, mbox_cmd->size_in); - rc = cxlds->mbox_send(cxlds, mbox_cmd); + rc = cxl_mbox->mbox_send(cxl_mbox, mbox_cmd); if (rc) goto out; @@ -526,10 +628,9 @@ out: return rc; } -int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) +int cxl_send_cmd(struct cxl_mailbox *cxl_mbox, struct cxl_send_command __user *s) { - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct device *dev = &cxlmd->dev; + struct device *dev = cxl_mbox->host; struct cxl_send_command send; struct cxl_mbox_cmd mbox_cmd; int rc; @@ -539,11 +640,11 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) if (copy_from_user(&send, s, sizeof(send))) return -EFAULT; - rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send); + rc = cxl_validate_cmd_from_user(&mbox_cmd, cxl_mbox, &send); if (rc) return rc; - rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload, + rc = handle_mailbox_cmd_from_user(cxl_mbox, &mbox_cmd, send.out.payload, &send.out.size, &send.retval); if (rc) return rc; @@ -554,22 +655,45 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) return 0; } -static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out) +static int cxl_xfer_log(struct cxl_memdev_state *mds, uuid_t *uuid, + u32 *size, u8 *out) { - u32 remaining = size; + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + u32 remaining = *size; u32 offset = 0; while (remaining) { - u32 xfer_size = min_t(u32, remaining, cxlds->payload_size); - struct cxl_mbox_get_log log = { + u32 xfer_size = min_t(u32, remaining, cxl_mbox->payload_size); + struct cxl_mbox_cmd mbox_cmd; + struct cxl_mbox_get_log log; + int rc; + + log = (struct cxl_mbox_get_log) { .uuid = *uuid, .offset = cpu_to_le32(offset), - .length = cpu_to_le32(xfer_size) + .length = cpu_to_le32(xfer_size), }; - int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log), - out, xfer_size); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_LOG, + .size_in = sizeof(log), + .payload_in = &log, + .size_out = xfer_size, + .payload_out = out, + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + + /* + * The output payload length that indicates the number + * of valid bytes can be smaller than the Log buffer + * size. + */ + if (rc == -EIO && mbox_cmd.size_out < xfer_size) { + offset += mbox_cmd.size_out; + break; + } + if (rc < 0) return rc; @@ -578,56 +702,114 @@ static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 offset += xfer_size; } + *size = offset; + return 0; } +static int check_features_opcodes(u16 opcode, int *ro_cmds, int *wr_cmds) +{ + switch (opcode) { + case CXL_MBOX_OP_GET_SUPPORTED_FEATURES: + case CXL_MBOX_OP_GET_FEATURE: + (*ro_cmds)++; + return 1; + case CXL_MBOX_OP_SET_FEATURE: + (*wr_cmds)++; + return 1; + default: + return 0; + } +} + +/* 'Get Supported Features' and 'Get Feature' */ +#define MAX_FEATURES_READ_CMDS 2 +static void set_features_cap(struct cxl_mailbox *cxl_mbox, + int ro_cmds, int wr_cmds) +{ + /* Setting up Features capability while walking the CEL */ + if (ro_cmds == MAX_FEATURES_READ_CMDS) { + if (wr_cmds) + cxl_mbox->feat_cap = CXL_FEATURES_RW; + else + cxl_mbox->feat_cap = CXL_FEATURES_RO; + } +} + /** * cxl_walk_cel() - Walk through the Command Effects Log. - * @cxlds: The device data for the operation + * @mds: The driver data for the operation * @size: Length of the Command Effects Log. * @cel: CEL * * Iterate over each entry in the CEL and determine if the driver supports the * command. If so, the command is enabled for the device and can be used later. */ -static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel) +static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_cel_entry *cel_entry; const int cel_entries = size / sizeof(*cel_entry); - int i; + struct device *dev = mds->cxlds.dev; + int i, ro_cmds = 0, wr_cmds = 0; cel_entry = (struct cxl_cel_entry *) cel; for (i = 0; i < cel_entries; i++) { u16 opcode = le16_to_cpu(cel_entry[i].opcode); struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); + int enabled = 0; - if (!cmd) { - dev_dbg(cxlds->dev, - "Opcode 0x%04x unsupported by driver", opcode); - continue; + if (cmd) { + set_bit(cmd->info.id, cxl_mbox->enabled_cmds); + enabled++; } - set_bit(cmd->info.id, cxlds->enabled_cmds); + enabled += check_features_opcodes(opcode, &ro_cmds, + &wr_cmds); + + if (cxl_is_poison_command(opcode)) { + cxl_set_poison_cmd_enabled(&mds->poison, opcode); + enabled++; + } + + if (cxl_is_security_command(opcode)) { + cxl_set_security_cmd_enabled(&mds->security, opcode); + enabled++; + } + + dev_dbg(dev, "Opcode 0x%04x %s\n", opcode, + enabled ? "enabled" : "unsupported by driver"); } + + set_features_cap(cxl_mbox, ro_cmds, wr_cmds); } -static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds) +static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_supported_logs *ret; + struct cxl_mbox_cmd mbox_cmd; int rc; - ret = kvmalloc(cxlds->payload_size, GFP_KERNEL); + ret = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); if (!ret) return ERR_PTR(-ENOMEM); - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret, - cxlds->payload_size); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_SUPPORTED_LOGS, + .size_out = cxl_mbox->payload_size, + .payload_out = ret, + /* At least the record number field must be valid */ + .min_out = 2, + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) { kvfree(ret); return ERR_PTR(rc); } + return ret; } @@ -644,22 +826,23 @@ static const uuid_t log_uuid[] = { /** * cxl_enumerate_cmds() - Enumerate commands for a device. - * @cxlds: The device data for the operation + * @mds: The driver data for the operation * * Returns 0 if enumerate completed successfully. * * CXL devices have optional support for certain commands. This function will * determine the set of supported commands for the hardware and update the - * enabled_cmds bitmap in the @cxlds. + * enabled_cmds bitmap in the @mds. */ -int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) +int cxl_enumerate_cmds(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_supported_logs *gsl; - struct device *dev = cxlds->dev; + struct device *dev = mds->cxlds.dev; struct cxl_mem_command *cmd; int i, rc; - gsl = cxl_get_gsl(cxlds); + gsl = cxl_get_gsl(mds); if (IS_ERR(gsl)) return PTR_ERR(gsl); @@ -680,33 +863,278 @@ int cxl_enumerate_cmds(struct cxl_dev_state *cxlds) goto out; } - rc = cxl_xfer_log(cxlds, &uuid, size, log); + rc = cxl_xfer_log(mds, &uuid, &size, log); if (rc) { kvfree(log); goto out; } - cxl_walk_cel(cxlds, size, log); + cxl_walk_cel(mds, size, log); kvfree(log); /* In case CEL was bogus, enable some default commands. */ cxl_for_each_cmd(cmd) if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE) - set_bit(cmd->info.id, cxlds->enabled_cmds); + set_bit(cmd->info.id, cxl_mbox->enabled_cmds); /* Found the required CEL */ rc = 0; } - out: kvfree(gsl); return rc; } -EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, "CXL"); + +void cxl_event_trace_record(const struct cxl_memdev *cxlmd, + enum cxl_event_log_type type, + enum cxl_event_type event_type, + const uuid_t *uuid, union cxl_event *evt) +{ + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { + trace_cxl_memory_module(cxlmd, type, &evt->mem_module); + return; + } + if (event_type == CXL_CPER_EVENT_GENERIC) { + trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); + return; + } + if (event_type == CXL_CPER_EVENT_MEM_SPARING) { + trace_cxl_memory_sparing(cxlmd, type, &evt->mem_sparing); + return; + } + + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { + u64 dpa, hpa = ULLONG_MAX, hpa_alias = ULLONG_MAX; + struct cxl_region *cxlr; + + /* + * These trace points are annotated with HPA and region + * translations. Take topology mutation locks and lookup + * { HPA, REGION } from { DPA, MEMDEV } in the event record. + */ + guard(rwsem_read)(&cxl_rwsem.region); + guard(rwsem_read)(&cxl_rwsem.dpa); + + dpa = le64_to_cpu(evt->media_hdr.phys_addr) & CXL_DPA_MASK; + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) { + u64 cache_size = cxlr->params.cache_size; + + hpa = cxl_dpa_to_hpa(cxlr, cxlmd, dpa); + if (cache_size) + hpa_alias = hpa - cache_size; + } + + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) { + if (cxl_store_rec_gen_media((struct cxl_memdev *)cxlmd, evt)) + dev_dbg(&cxlmd->dev, "CXL store rec_gen_media failed\n"); + + if (evt->gen_media.media_hdr.descriptor & + CXL_GMER_EVT_DESC_THRESHOLD_EVENT) + WARN_ON_ONCE((evt->gen_media.media_hdr.type & + CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) && + !get_unaligned_le24(evt->gen_media.cme_count)); + else + WARN_ON_ONCE(evt->gen_media.media_hdr.type & + CXL_GMER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE); + + trace_cxl_general_media(cxlmd, type, cxlr, hpa, + hpa_alias, &evt->gen_media); + } else if (event_type == CXL_CPER_EVENT_DRAM) { + if (cxl_store_rec_dram((struct cxl_memdev *)cxlmd, evt)) + dev_dbg(&cxlmd->dev, "CXL store rec_dram failed\n"); + + if (evt->dram.media_hdr.descriptor & + CXL_GMER_EVT_DESC_THRESHOLD_EVENT) + WARN_ON_ONCE((evt->dram.media_hdr.type & + CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE) && + !get_unaligned_le24(evt->dram.cvme_count)); + else + WARN_ON_ONCE(evt->dram.media_hdr.type & + CXL_DER_MEM_EVT_TYPE_AP_CME_COUNTER_EXPIRE); + + trace_cxl_dram(cxlmd, type, cxlr, hpa, hpa_alias, + &evt->dram); + } + } +} +EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, "CXL"); + +static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd, + enum cxl_event_log_type type, + struct cxl_event_record_raw *record) +{ + enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC; + const uuid_t *uuid = &record->id; + + if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID)) + ev_type = CXL_CPER_EVENT_GEN_MEDIA; + else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID)) + ev_type = CXL_CPER_EVENT_DRAM; + else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID)) + ev_type = CXL_CPER_EVENT_MEM_MODULE; + else if (uuid_equal(uuid, &CXL_EVENT_MEM_SPARING_UUID)) + ev_type = CXL_CPER_EVENT_MEM_SPARING; + + cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event); +} + +static int cxl_clear_event_record(struct cxl_memdev_state *mds, + enum cxl_event_log_type log, + struct cxl_get_event_payload *get_pl) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_clear_event_payload *payload; + u16 total = le16_to_cpu(get_pl->record_count); + u8 max_handles = CXL_CLEAR_EVENT_MAX_HANDLES; + size_t pl_size = struct_size(payload, handles, max_handles); + struct cxl_mbox_cmd mbox_cmd; + u16 cnt; + int rc = 0; + int i; + + /* Payload size may limit the max handles */ + if (pl_size > cxl_mbox->payload_size) { + max_handles = (cxl_mbox->payload_size - sizeof(*payload)) / + sizeof(__le16); + pl_size = struct_size(payload, handles, max_handles); + } + + payload = kvzalloc(pl_size, GFP_KERNEL); + if (!payload) + return -ENOMEM; + + *payload = (struct cxl_mbox_clear_event_payload) { + .event_log = log, + }; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_CLEAR_EVENT_RECORD, + .payload_in = payload, + .size_in = pl_size, + }; + + /* + * Clear Event Records uses u8 for the handle cnt while Get Event + * Record can return up to 0xffff records. + */ + i = 0; + for (cnt = 0; cnt < total; cnt++) { + struct cxl_event_record_raw *raw = &get_pl->records[cnt]; + struct cxl_event_generic *gen = &raw->event.generic; + + payload->handles[i++] = gen->hdr.handle; + dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log, + le16_to_cpu(payload->handles[i - 1])); + + if (i == max_handles) { + payload->nr_recs = i; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc) + goto free_pl; + i = 0; + } + } + + /* Clear what is left if any */ + if (i) { + payload->nr_recs = i; + mbox_cmd.size_in = struct_size(payload, handles, i); + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc) + goto free_pl; + } + +free_pl: + kvfree(payload); + return rc; +} + +static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, + enum cxl_event_log_type type) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; + struct device *dev = mds->cxlds.dev; + struct cxl_get_event_payload *payload; + u8 log_type = type; + u16 nr_rec; + + mutex_lock(&mds->event.log_lock); + payload = mds->event.buf; + + do { + int rc, i; + struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, + .payload_in = &log_type, + .size_in = sizeof(log_type), + .payload_out = payload, + .size_out = cxl_mbox->payload_size, + .min_out = struct_size(payload, records, 0), + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc) { + dev_err_ratelimited(dev, + "Event log '%d': Failed to query event records : %d", + type, rc); + break; + } + + nr_rec = le16_to_cpu(payload->record_count); + if (!nr_rec) + break; + + for (i = 0; i < nr_rec; i++) + __cxl_event_trace_record(cxlmd, type, + &payload->records[i]); + + if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW) + trace_cxl_overflow(cxlmd, type, payload); + + rc = cxl_clear_event_record(mds, type, payload); + if (rc) { + dev_err_ratelimited(dev, + "Event log '%d': Failed to clear events : %d", + type, rc); + break; + } + } while (nr_rec); + + mutex_unlock(&mds->event.log_lock); +} + +/** + * cxl_mem_get_event_records - Get Event Records from the device + * @mds: The driver data for the operation + * @status: Event Status register value identifying which events are available. + * + * Retrieve all event records available on the device, report them as trace + * events, and clear them. + * + * See CXL rev 3.0 @8.2.9.2.2 Get Event Records + * See CXL rev 3.0 @8.2.9.2.3 Clear Event Records + */ +void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status) +{ + dev_dbg(mds->cxlds.dev, "Reading event logs: %x\n", status); + + if (status & CXLDEV_EVENT_STATUS_FATAL) + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FATAL); + if (status & CXLDEV_EVENT_STATUS_FAIL) + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_FAIL); + if (status & CXLDEV_EVENT_STATUS_WARN) + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_WARN); + if (status & CXLDEV_EVENT_STATUS_INFO) + cxl_mem_get_records_log(mds, CXL_EVENT_TYPE_INFO); +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_get_event_records, "CXL"); /** * cxl_mem_get_partition_info - Get partition info - * @cxlds: The device data for the operation + * @mds: The driver data for the operation * * Retrieve the current partition info for the device specified. The active * values are the current capacity in bytes. If not 0, the 'next' values are @@ -716,141 +1144,403 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); * * See CXL @8.2.9.5.2.1 Get Partition Info */ -static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds) +static int cxl_mem_get_partition_info(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_partition_info pi; + struct cxl_mbox_cmd mbox_cmd; int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0, - &pi, sizeof(pi)); - + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_PARTITION_INFO, + .size_out = sizeof(pi), + .payload_out = &pi, + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc) return rc; - cxlds->active_volatile_bytes = + mds->active_volatile_bytes = le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER; - cxlds->active_persistent_bytes = + mds->active_persistent_bytes = le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER; - cxlds->next_volatile_bytes = - le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; - cxlds->next_persistent_bytes = - le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER; return 0; } /** * cxl_dev_state_identify() - Send the IDENTIFY command to the device. - * @cxlds: The device data for the operation + * @mds: The driver data for the operation * - * Return: 0 if identify was executed successfully. + * Return: 0 if identify was executed successfully or media not ready. * * This will dispatch the identify command to the device and on success populate * structures to be exported to sysfs. */ -int cxl_dev_state_identify(struct cxl_dev_state *cxlds) +int cxl_dev_state_identify(struct cxl_memdev_state *mds) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify id; + struct cxl_mbox_cmd mbox_cmd; + u32 val; int rc; - rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id, - sizeof(id)); + if (!mds->cxlds.media_ready) + return 0; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_IDENTIFY, + .size_out = sizeof(id), + .payload_out = &id, + }; + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); if (rc < 0) return rc; - cxlds->total_bytes = + mds->total_bytes = le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER; - cxlds->volatile_only_bytes = + mds->volatile_only_bytes = le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER; - cxlds->persistent_only_bytes = + mds->persistent_only_bytes = le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER; - cxlds->partition_align_bytes = + mds->partition_align_bytes = le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; - cxlds->lsa_size = le32_to_cpu(id.lsa_size); - memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision)); + mds->lsa_size = le32_to_cpu(id.lsa_size); + memcpy(mds->firmware_version, id.fw_revision, + sizeof(id.fw_revision)); + + if (test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) { + val = get_unaligned_le24(id.poison_list_max_mer); + mds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); + } return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, "CXL"); -static int add_dpa_res(struct device *dev, struct resource *parent, - struct resource *res, resource_size_t start, - resource_size_t size, const char *type) +static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd) { + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; int rc; + u32 sec_out = 0; + struct cxl_get_security_output { + __le32 flags; + } out; + struct cxl_mbox_cmd sec_cmd = { + .opcode = CXL_MBOX_OP_GET_SECURITY_STATE, + .payload_out = &out, + .size_out = sizeof(out), + }; + struct cxl_mbox_cmd mbox_cmd = { .opcode = cmd }; - res->name = type; - res->start = start; - res->end = start + size - 1; - res->flags = IORESOURCE_MEM; - if (resource_size(res) == 0) { - dev_dbg(dev, "DPA(%s): no capacity\n", res->name); - return 0; - } - rc = request_resource(parent, res); - if (rc) { - dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, - res, rc); + if (cmd != CXL_MBOX_OP_SANITIZE && cmd != CXL_MBOX_OP_SECURE_ERASE) + return -EINVAL; + + rc = cxl_internal_send_cmd(cxl_mbox, &sec_cmd); + if (rc < 0) { + dev_err(cxl_mbox->host, "Failed to get security state : %d", rc); return rc; } - dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); + /* + * Prior to using these commands, any security applied to + * the user data areas of the device shall be DISABLED (or + * UNLOCKED for secure erase case). + */ + sec_out = le32_to_cpu(out.flags); + if (sec_out & CXL_PMEM_SEC_STATE_USER_PASS_SET) + return -EINVAL; + + if (cmd == CXL_MBOX_OP_SECURE_ERASE && + sec_out & CXL_PMEM_SEC_STATE_LOCKED) + return -EINVAL; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc < 0) { + dev_err(cxl_mbox->host, "Failed to sanitize device : %d", rc); + return rc; + } return 0; } -int cxl_mem_create_range_info(struct cxl_dev_state *cxlds) + +/** + * cxl_mem_sanitize() - Send a sanitization command to the device. + * @cxlmd: The device for the operation + * @cmd: The specific sanitization command opcode + * + * Return: 0 if the command was executed successfully, regardless of + * whether or not the actual security operation is done in the background, + * such as for the Sanitize case. + * Error return values can be the result of the mailbox command, -EINVAL + * when security requirements are not met or invalid contexts, or -EBUSY + * if the sanitize operation is already in flight. + * + * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase. + */ +int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd) +{ + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_port *endpoint; + + /* synchronize with cxl_mem_probe() and decoder write operations */ + guard(device)(&cxlmd->dev); + endpoint = cxlmd->endpoint; + guard(rwsem_read)(&cxl_rwsem.region); + /* + * Require an endpoint to be safe otherwise the driver can not + * be sure that the device is unmapped. + */ + if (endpoint && cxl_num_decoders_committed(endpoint) == 0) + return __cxl_mem_sanitize(mds, cmd); + + return -EBUSY; +} + +static void add_part(struct cxl_dpa_info *info, u64 start, u64 size, enum cxl_partition_mode mode) +{ + int i = info->nr_partitions; + + if (size == 0) + return; + + info->part[i].range = (struct range) { + .start = start, + .end = start + size - 1, + }; + info->part[i].mode = mode; + info->nr_partitions++; +} + +int cxl_mem_dpa_fetch(struct cxl_memdev_state *mds, struct cxl_dpa_info *info) { + struct cxl_dev_state *cxlds = &mds->cxlds; struct device *dev = cxlds->dev; int rc; - cxlds->dpa_res = - (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes); + if (!cxlds->media_ready) { + info->size = 0; + return 0; + } + + info->size = mds->total_bytes; - if (cxlds->partition_align_bytes == 0) { - rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, - cxlds->volatile_only_bytes, "ram"); - if (rc) - return rc; - return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, - cxlds->volatile_only_bytes, - cxlds->persistent_only_bytes, "pmem"); + if (mds->partition_align_bytes == 0) { + add_part(info, 0, mds->volatile_only_bytes, CXL_PARTMODE_RAM); + add_part(info, mds->volatile_only_bytes, + mds->persistent_only_bytes, CXL_PARTMODE_PMEM); + return 0; } - rc = cxl_mem_get_partition_info(cxlds); + rc = cxl_mem_get_partition_info(mds); if (rc) { dev_err(dev, "Failed to query partition information\n"); return rc; } - rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, - cxlds->active_volatile_bytes, "ram"); - if (rc) + add_part(info, 0, mds->active_volatile_bytes, CXL_PARTMODE_RAM); + add_part(info, mds->active_volatile_bytes, mds->active_persistent_bytes, + CXL_PARTMODE_PMEM); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_dpa_fetch, "CXL"); + +int cxl_get_dirty_count(struct cxl_memdev_state *mds, u32 *count) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_get_health_info_out hi; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_HEALTH_INFO, + .size_out = sizeof(hi), + .payload_out = &hi, + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (!rc) + *count = le32_to_cpu(hi.dirty_shutdown_cnt); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_get_dirty_count, "CXL"); + +int cxl_arm_dirty_shutdown(struct cxl_memdev_state *mds) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_cmd mbox_cmd; + struct cxl_mbox_set_shutdown_state_in in = { + .state = 1 + }; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_SHUTDOWN_STATE, + .size_in = sizeof(in), + .payload_in = &in, + }; + + return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); +} +EXPORT_SYMBOL_NS_GPL(cxl_arm_dirty_shutdown, "CXL"); + +int cxl_set_timestamp(struct cxl_memdev_state *mds) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + struct cxl_mbox_cmd mbox_cmd; + struct cxl_mbox_set_timestamp_in pi; + int rc; + + pi.timestamp = cpu_to_le64(ktime_get_real_ns()); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_SET_TIMESTAMP, + .size_in = sizeof(pi), + .payload_in = &pi, + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + /* + * Command is optional. Devices may have another way of providing + * a timestamp, or may return all 0s in timestamp fields. + * Don't report an error if this command isn't supported + */ + if (rc && (mbox_cmd.return_code != CXL_MBOX_CMD_RC_UNSUPPORTED)) + return rc; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, "CXL"); + +int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, + struct cxl_region *cxlr) +{ + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); + struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; + struct cxl_mbox_poison_out *po; + struct cxl_mbox_poison_in pi; + int nr_records = 0; + int rc; + + ACQUIRE(mutex_intr, lock)(&mds->poison.mutex); + if ((rc = ACQUIRE_ERR(mutex_intr, &lock))) + return rc; + + po = mds->poison.list_out; + pi.offset = cpu_to_le64(offset); + pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); + + do { + struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){ + .opcode = CXL_MBOX_OP_GET_POISON, + .size_in = sizeof(pi), + .payload_in = &pi, + .size_out = cxl_mbox->payload_size, + .payload_out = po, + .min_out = struct_size(po, record, 0), + }; + + rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); + if (rc) + break; + + for (int i = 0; i < le16_to_cpu(po->count); i++) + trace_cxl_poison(cxlmd, cxlr, &po->record[i], + po->flags, po->overflow_ts, + CXL_POISON_TRACE_LIST); + + /* Protect against an uncleared _FLAG_MORE */ + nr_records = nr_records + le16_to_cpu(po->count); + if (nr_records >= mds->poison.max_errors) { + dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", + nr_records); + break; + } + } while (po->flags & CXL_POISON_FLAG_MORE); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, "CXL"); + +static void free_poison_buf(void *buf) +{ + kvfree(buf); +} + +/* Get Poison List output buffer is protected by mds->poison.lock */ +static int cxl_poison_alloc_buf(struct cxl_memdev_state *mds) +{ + struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; + + mds->poison.list_out = kvmalloc(cxl_mbox->payload_size, GFP_KERNEL); + if (!mds->poison.list_out) + return -ENOMEM; + + return devm_add_action_or_reset(mds->cxlds.dev, free_poison_buf, + mds->poison.list_out); +} + +int cxl_poison_state_init(struct cxl_memdev_state *mds) +{ + int rc; + + if (!test_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds)) + return 0; + + rc = cxl_poison_alloc_buf(mds); + if (rc) { + clear_bit(CXL_POISON_ENABLED_LIST, mds->poison.enabled_cmds); return rc; - return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, - cxlds->active_volatile_bytes, - cxlds->active_persistent_bytes, "pmem"); + } + + mutex_init(&mds->poison.mutex); + return 0; } -EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, "CXL"); -struct cxl_dev_state *cxl_dev_state_create(struct device *dev) +int cxl_mailbox_init(struct cxl_mailbox *cxl_mbox, struct device *host) { - struct cxl_dev_state *cxlds; + if (!cxl_mbox || !host) + return -EINVAL; - cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL); - if (!cxlds) { + cxl_mbox->host = host; + mutex_init(&cxl_mbox->mbox_mutex); + rcuwait_init(&cxl_mbox->mbox_wait); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_mailbox_init, "CXL"); + +struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) +{ + struct cxl_memdev_state *mds; + int rc; + + mds = devm_kzalloc(dev, sizeof(*mds), GFP_KERNEL); + if (!mds) { dev_err(dev, "No memory available\n"); return ERR_PTR(-ENOMEM); } - mutex_init(&cxlds->mbox_mutex); - cxlds->dev = dev; + mutex_init(&mds->event.log_lock); + mds->cxlds.dev = dev; + mds->cxlds.reg_map.host = dev; + mds->cxlds.cxl_mbox.host = dev; + mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; + mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; + + rc = devm_cxl_register_mce_notifier(dev, &mds->mce_notifier); + if (rc == -EOPNOTSUPP) + dev_warn(dev, "CXL MCE unsupported\n"); + else if (rc) + return ERR_PTR(rc); - return cxlds; + return mds; } -EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL); +EXPORT_SYMBOL_NS_GPL(cxl_memdev_state_create, "CXL"); void __init cxl_mbox_init(void) { |
