diff options
Diffstat (limited to 'drivers/crypto/ccp')
| -rw-r--r-- | drivers/crypto/ccp/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/crypto/ccp/Makefile | 4 | ||||
| -rw-r--r-- | drivers/crypto/ccp/sev-dev-tio.c | 864 | ||||
| -rw-r--r-- | drivers/crypto/ccp/sev-dev-tio.h | 123 | ||||
| -rw-r--r-- | drivers/crypto/ccp/sev-dev-tsm.c | 405 | ||||
| -rw-r--r-- | drivers/crypto/ccp/sev-dev.c | 103 | ||||
| -rw-r--r-- | drivers/crypto/ccp/sev-dev.h | 11 |
7 files changed, 1501 insertions, 10 deletions
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index f394e45e11ab..f16a0f611317 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP bool "Platform Security Processor (PSP) device" default y depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU + select PCI_TSM if PCI help Provide support for the AMD Platform Security Processor (PSP). The PSP is a dedicated processor that provides support for key diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index a9626b30044a..0424e08561ef 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,6 +16,10 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hsti.o \ sfs.o +ifeq ($(CONFIG_PCI_TSM),y) +ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o +endif + obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c new file mode 100644 index 000000000000..9a98f98c20a7 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev-tio.c @@ -0,0 +1,864 @@ +// SPDX-License-Identifier: GPL-2.0-only + +// Interface to PSP for CCP/SEV-TIO/SNP-VM + +#include <linux/pci.h> +#include <linux/tsm.h> +#include <linux/psp.h> +#include <linux/vmalloc.h> +#include <linux/bitfield.h> +#include <linux/pci-doe.h> +#include <asm/sev-common.h> +#include <asm/sev.h> +#include <asm/page.h> +#include "sev-dev.h" +#include "sev-dev-tio.h" + +#define to_tio_status(dev_data) \ + (container_of((dev_data), struct tio_dsm, data)->sev->tio_status) + +#define SLA_PAGE_TYPE_DATA 0 +#define SLA_PAGE_TYPE_SCATTER 1 +#define SLA_PAGE_SIZE_4K 0 +#define SLA_PAGE_SIZE_2M 1 +#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K) +#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t)) +#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) }) +#define SLA_NULL ((struct sla_addr_t) { 0 }) +#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla) +#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla) + +static phys_addr_t sla_to_pa(struct sla_addr_t sla) +{ + u64 pfn = sla.pfn; + u64 pa = pfn << PAGE_SHIFT; + + return pa; +} + +static void *sla_to_va(struct sla_addr_t sla) +{ + void *va = __va(__sme_clr(sla_to_pa(sla))); + + return va; +} + +#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT) +#define sla_to_page(sla) virt_to_page(sla_to_va(sla)) + +static struct sla_addr_t make_sla(struct page *pg, bool stp) +{ + u64 pa = __sme_set(page_to_phys(pg)); + struct sla_addr_t ret = { + .pfn = pa >> PAGE_SHIFT, + .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */ + .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA + }; + + return ret; +} + +/* the BUFFER Structure */ +#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0) + +/* + * struct sla_buffer_hdr - Scatter list address buffer header + * + * @capacity_sz: Total capacity of the buffer in bytes + * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B + * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted) + * @iv: Initialization vector used for encryption + * @authtag: Authentication tag for encrypted buffer + */ +struct sla_buffer_hdr { + u32 capacity_sz; + u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */ + u32 flags; + u8 reserved1[4]; + u8 iv[16]; /* IV used for the encryption of this buffer */ + u8 authtag[16]; /* Authentication tag for this buffer */ + u8 reserved2[16]; +} __packed; + +enum spdm_data_type_t { + DOBJ_DATA_TYPE_SPDM = 0x1, + DOBJ_DATA_TYPE_SECURE_SPDM = 0x2, +}; + +struct spdm_dobj_hdr_req { + struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */ + u8 data_type; /* spdm_data_type_t */ + u8 reserved2[5]; +} __packed; + +struct spdm_dobj_hdr_resp { + struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */ + u8 data_type; /* spdm_data_type_t */ + u8 reserved2[5]; +} __packed; + +/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */ +struct spdm_dobj_hdr_cert; +struct spdm_dobj_hdr_meas; +struct spdm_dobj_hdr_report; + +/* Used in all SPDM-aware TIO commands */ +struct spdm_ctrl { + struct sla_addr_t req; + struct sla_addr_t resp; + struct sla_addr_t scratch; + struct sla_addr_t output; +} __packed; + +static size_t sla_dobj_id_to_size(u8 id) +{ + size_t n; + + BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10); + switch (id) { + case SPDM_DOBJ_ID_REQ: + n = sizeof(struct spdm_dobj_hdr_req); + break; + case SPDM_DOBJ_ID_RESP: + n = sizeof(struct spdm_dobj_hdr_resp); + break; + default: + WARN_ON(1); + n = 0; + break; + } + + return n; +} + +#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id) +#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr)) +#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr)) + +#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \ + sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP)) +#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \ + sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ)) + +static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf) +{ + if (!buf) + return NULL; + + return (struct spdm_dobj_hdr *) &buf[1]; +} + +static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid) +{ + struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf); + + if (WARN_ON_ONCE(!hdr)) + return NULL; + + if (hdr->id != check_dobjid) { + pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id); + return NULL; + } + + return hdr; +} + +static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid) +{ + struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf); + + if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP)) + return NULL; + + if (!hdr) + return NULL; + + return (u8 *) hdr + sla_dobj_id_to_size(dobjid); +} + +/* + * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command + * + * @length: Length of this command buffer in bytes + * @status_paddr: System physical address of the TIO_STATUS structure + */ +struct sev_data_tio_status { + u32 length; + u8 reserved[4]; + u64 status_paddr; +} __packed; + +/* TIO_INIT */ +struct sev_data_tio_init { + u32 length; + u8 reserved[12]; +} __packed; + +/* + * struct sev_data_tio_dev_create - TIO_DEV_CREATE command + * + * @length: Length in bytes of this command buffer + * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer + * @device_id: PCIe Routing Identifier of the device to connect to + * @root_port_id: PCIe Routing Identifier of the root port of the device + * @segment_id: PCIe Segment Identifier of the device to connect to + */ +struct sev_data_tio_dev_create { + u32 length; + u8 reserved1[4]; + struct sla_addr_t dev_ctx_sla; + u16 device_id; + u16 root_port_id; + u8 segment_id; + u8 reserved2[11]; +} __packed; + +/* + * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command + * + * @length: Length in bytes of this command buffer + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage. + * Setting the kth bit of the TC_MASK to 1 indicates that the traffic + * class k will be initialized + * @cert_slot: Slot number of the certificate requested for constructing the SPDM session + * @ide_stream_id: IDE stream IDs to be associated with this device. + * Valid only if corresponding bit in TC_MASK is set + */ +struct sev_data_tio_dev_connect { + u32 length; + u8 reserved1[4]; + struct spdm_ctrl spdm_ctrl; + u8 reserved2[8]; + struct sla_addr_t dev_ctx_sla; + u8 tc_mask; + u8 cert_slot; + u8 reserved3[6]; + u8 ide_stream_id[8]; + u8 reserved4[8]; +} __packed; + +/* + * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command + * + * @length: Length in bytes of this command buffer + * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect) + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + */ +#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0) + +struct sev_data_tio_dev_disconnect { + u32 length; + u32 flags; + struct spdm_ctrl spdm_ctrl; + struct sla_addr_t dev_ctx_sla; +} __packed; + +/* + * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command + * + * @length: Length in bytes of this command buffer + * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements) + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + * @meas_nonce: Nonce for measurement freshness verification + */ +#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0) + +struct sev_data_tio_dev_meas { + u32 length; + u32 flags; + struct spdm_ctrl spdm_ctrl; + struct sla_addr_t dev_ctx_sla; + u8 meas_nonce[32]; +} __packed; + +/* + * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command + * + * @length: Length in bytes of this command buffer + * @spdm_ctrl: SPDM control structure defined in Section 5.1 + * @dev_ctx_sla: Scatter list address of the device context buffer + */ +struct sev_data_tio_dev_certs { + u32 length; + u8 reserved[4]; + struct spdm_ctrl spdm_ctrl; + struct sla_addr_t dev_ctx_sla; +} __packed; + +/* + * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command + * + * @length: Length in bytes of this command buffer + * @dev_ctx_sla: Scatter list address of the device context buffer + * + * This command reclaims resources associated with a device context. + */ +struct sev_data_tio_dev_reclaim { + u32 length; + u8 reserved[4]; + struct sla_addr_t dev_ctx_sla; +} __packed; + +static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla) +{ + struct sla_buffer_hdr *buf; + + BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40); + if (IS_SLA_NULL(sla)) + return NULL; + + if (sla.page_type == SLA_PAGE_TYPE_SCATTER) { + struct sla_addr_t *scatter = sla_to_va(sla); + unsigned int i, npages = 0; + + for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) { + if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K)) + return NULL; + + if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER)) + return NULL; + + if (IS_SLA_EOL(scatter[i])) { + npages = i; + break; + } + } + if (WARN_ON_ONCE(!npages)) + return NULL; + + struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL); + + if (!pp) + return NULL; + + for (i = 0; i < npages; ++i) + pp[i] = sla_to_page(scatter[i]); + + buf = vm_map_ram(pp, npages, 0); + kfree(pp); + } else { + struct page *pg = sla_to_page(sla); + + buf = vm_map_ram(&pg, 1, 0); + } + + return buf; +} + +static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf) +{ + if (!buf) + return; + + if (sla.page_type == SLA_PAGE_TYPE_SCATTER) { + struct sla_addr_t *scatter = sla_to_va(sla); + unsigned int i, npages = 0; + + for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) { + if (IS_SLA_EOL(scatter[i])) { + npages = i; + break; + } + } + if (!npages) + return; + + vm_unmap_ram(buf, npages); + } else { + vm_unmap_ram(buf, 1); + } +} + +static void dobj_response_init(struct sla_buffer_hdr *buf) +{ + struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf); + + dobj->id = SPDM_DOBJ_ID_RESP; + dobj->version.major = 0x1; + dobj->version.minor = 0; + dobj->length = 0; + buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length; +} + +static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state) +{ + unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT; + struct sla_addr_t *scatter = NULL; + int ret = 0, i; + + if (IS_SLA_NULL(sla)) + return; + + if (firmware_state) { + if (sla.page_type == SLA_PAGE_TYPE_SCATTER) { + scatter = sla_to_va(sla); + + for (i = 0; i < npages; ++i) { + if (IS_SLA_EOL(scatter[i])) + break; + + ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false); + if (ret) + break; + } + } else { + ret = snp_reclaim_pages(sla_to_pa(sla), 1, false); + } + } + + if (WARN_ON(ret)) + return; + + if (scatter) { + for (i = 0; i < npages; ++i) { + if (IS_SLA_EOL(scatter[i])) + break; + free_page((unsigned long)sla_to_va(scatter[i])); + } + } + + free_page((unsigned long)sla_to_va(sla)); +} + +static struct sla_addr_t sla_alloc(size_t len, bool firmware_state) +{ + unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT; + struct sla_addr_t *scatter = NULL; + struct sla_addr_t ret = SLA_NULL; + struct sla_buffer_hdr *buf; + struct page *pg; + + if (npages == 0) + return ret; + + if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1))) + return ret; + + BUILD_BUG_ON(PAGE_SIZE < SZ_4K); + + if (npages > 1) { + pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pg) + return SLA_NULL; + + ret = make_sla(pg, true); + scatter = page_to_virt(pg); + for (i = 0; i < npages; ++i) { + pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pg) + goto no_reclaim_exit; + + scatter[i] = make_sla(pg, false); + } + scatter[i] = SLA_EOL; + } else { + pg = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pg) + return SLA_NULL; + + ret = make_sla(pg, false); + } + + buf = sla_buffer_map(ret); + if (!buf) + goto no_reclaim_exit; + + buf->capacity_sz = (npages << PAGE_SHIFT); + sla_buffer_unmap(ret, buf); + + if (firmware_state) { + if (scatter) { + for (i = 0; i < npages; ++i) { + if (rmp_make_private(sla_to_pfn(scatter[i]), 0, + PG_LEVEL_4K, 0, true)) + goto free_exit; + } + } else { + if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true)) + goto no_reclaim_exit; + } + } + + return ret; + +no_reclaim_exit: + firmware_state = false; +free_exit: + sla_free(ret, len, firmware_state); + return SLA_NULL; +} + +/* Expands a buffer, only firmware owned buffers allowed for now */ +static int sla_expand(struct sla_addr_t *sla, size_t *len) +{ + struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf; + struct sla_addr_t oldsla = *sla, newsla; + size_t oldlen = *len, newlen; + + if (!oldbuf) + return -EFAULT; + + newlen = oldbuf->capacity_sz; + if (oldbuf->capacity_sz == oldlen) { + /* This buffer does not require expansion, must be another buffer */ + sla_buffer_unmap(oldsla, oldbuf); + return 1; + } + + pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen); + + newsla = sla_alloc(newlen, true); + if (IS_SLA_NULL(newsla)) + return -ENOMEM; + + newbuf = sla_buffer_map(newsla); + if (!newbuf) { + sla_free(newsla, newlen, true); + return -EFAULT; + } + + memcpy(newbuf, oldbuf, oldlen); + + sla_buffer_unmap(newsla, newbuf); + sla_free(oldsla, oldlen, true); + *sla = newsla; + *len = newlen; + + return 0; +} + +static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret, + struct tsm_dsm_tio *dev_data) +{ + int rc; + + *psp_ret = 0; + rc = sev_do_cmd(cmd, data, psp_ret); + + if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST)) + return -EIO; + + if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) { + int rc1, rc2; + + rc1 = sla_expand(&dev_data->output, &dev_data->output_len); + if (rc1 < 0) + return rc1; + + rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len); + if (rc2 < 0) + return rc2; + + if (!rc1 && !rc2) + /* Neither buffer requires expansion, this is wrong */ + return -EFAULT; + + *psp_ret = 0; + rc = sev_do_cmd(cmd, data, psp_ret); + } + + if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) { + struct spdm_dobj_hdr_resp *resp_hdr; + struct spdm_dobj_hdr_req *req_hdr; + struct sev_tio_status *tio_status = to_tio_status(dev_data); + size_t resp_len = tio_status->spdm_req_size_max - + (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr)); + + if (!dev_data->cmd) { + if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data))) + return -EINVAL; + if (WARN_ON(data_len > sizeof(dev_data->cmd_data))) + return -EFAULT; + memcpy(dev_data->cmd_data, data, data_len); + memset(&dev_data->cmd_data[data_len], 0xFF, + sizeof(dev_data->cmd_data) - data_len); + dev_data->cmd = cmd; + } + + req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf); + resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf); + switch (req_hdr->data_type) { + case DOBJ_DATA_TYPE_SPDM: + rc = PCI_DOE_FEATURE_CMA; + break; + case DOBJ_DATA_TYPE_SECURE_SPDM: + rc = PCI_DOE_FEATURE_SSESSION; + break; + default: + return -EINVAL; + } + resp_hdr->data_type = req_hdr->data_type; + dev_data->spdm.req_len = req_hdr->hdr.length - + sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ); + dev_data->spdm.rsp_len = resp_len; + } else if (dev_data && dev_data->cmd) { + /* For either error or success just stop the bouncing */ + memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data)); + dev_data->cmd = 0; + } + + return rc; +} + +int sev_tio_continue(struct tsm_dsm_tio *dev_data) +{ + struct spdm_dobj_hdr_resp *resp_hdr; + int ret; + + if (!dev_data || !dev_data->cmd) + return -EINVAL; + + resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf); + resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + + dev_data->spdm.rsp_len, 32); + dev_data->respbuf->payload_sz = resp_hdr->hdr.length; + + ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0, + &dev_data->psp_ret, dev_data); + if (ret) + return ret; + + if (dev_data->psp_ret != SEV_RET_SUCCESS) + return -EINVAL; + + return 0; +} + +static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data) +{ + ctrl->req = dev_data->req; + ctrl->resp = dev_data->resp; + ctrl->scratch = dev_data->scratch; + ctrl->output = dev_data->output; +} + +static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + size_t len = tio_status->spdm_req_size_max - + (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + + sizeof(struct sla_buffer_hdr)); + struct tsm_spdm *spdm = &dev_data->spdm; + + sla_buffer_unmap(dev_data->resp, dev_data->respbuf); + sla_buffer_unmap(dev_data->req, dev_data->reqbuf); + spdm->rsp = NULL; + spdm->req = NULL; + sla_free(dev_data->req, len, true); + sla_free(dev_data->resp, len, false); + sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true); + + dev_data->req.sla = 0; + dev_data->resp.sla = 0; + dev_data->scratch.sla = 0; + dev_data->respbuf = NULL; + dev_data->reqbuf = NULL; + sla_free(dev_data->output, tio_status->spdm_out_size_max, true); +} + +static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + struct tsm_spdm *spdm = &dev_data->spdm; + int ret; + + dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true); + dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false); + dev_data->scratch_len = tio_status->spdm_scratch_size_max; + dev_data->scratch = sla_alloc(dev_data->scratch_len, true); + dev_data->output_len = tio_status->spdm_out_size_max; + dev_data->output = sla_alloc(dev_data->output_len, true); + + if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) || + IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) { + ret = -ENOMEM; + goto free_spdm_exit; + } + + dev_data->reqbuf = sla_buffer_map(dev_data->req); + dev_data->respbuf = sla_buffer_map(dev_data->resp); + if (!dev_data->reqbuf || !dev_data->respbuf) { + ret = -EFAULT; + goto free_spdm_exit; + } + + spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ); + spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP); + if (!spdm->req || !spdm->rsp) { + ret = -EFAULT; + goto free_spdm_exit; + } + + dobj_response_init(dev_data->respbuf); + + return 0; + +free_spdm_exit: + spdm_ctrl_free(dev_data); + return ret; +} + +int sev_tio_init_locked(void *tio_status_page) +{ + struct sev_tio_status *tio_status = tio_status_page; + struct sev_data_tio_status data_status = { + .length = sizeof(data_status), + }; + int ret, psp_ret; + + data_status.status_paddr = __psp_pa(tio_status_page); + ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret); + if (ret) + return ret; + + if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) || + tio_status->reserved) + return -EFAULT; + + if (!tio_status->tio_en && !tio_status->tio_init_done) + return -ENOENT; + + if (tio_status->tio_init_done) + return -EBUSY; + + struct sev_data_tio_init ti = { .length = sizeof(ti) }; + + ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret); + if (ret) + return ret; + + ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret); + if (ret) + return ret; + + return 0; +} + +int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, + u16 root_port_id, u8 segment_id) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + struct sev_data_tio_dev_create create = { + .length = sizeof(create), + .device_id = device_id, + .root_port_id = root_port_id, + .segment_id = segment_id, + }; + void *data_pg; + int ret; + + dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true); + if (IS_SLA_NULL(dev_data->dev_ctx)) + return -ENOMEM; + + data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT); + if (!data_pg) { + ret = -ENOMEM; + goto free_ctx_exit; + } + + create.dev_ctx_sla = dev_data->dev_ctx; + ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret); + if (ret) + goto free_data_pg_exit; + + dev_data->data_pg = data_pg; + + return 0; + +free_data_pg_exit: + snp_free_firmware_page(data_pg); +free_ctx_exit: + sla_free(create.dev_ctx_sla, tio_status->devctx_size, true); + return ret; +} + +int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data) +{ + struct sev_tio_status *tio_status = to_tio_status(dev_data); + struct sev_data_tio_dev_reclaim r = { + .length = sizeof(r), + .dev_ctx_sla = dev_data->dev_ctx, + }; + int ret; + + if (dev_data->data_pg) { + snp_free_firmware_page(dev_data->data_pg); + dev_data->data_pg = NULL; + } + + if (IS_SLA_NULL(dev_data->dev_ctx)) + return 0; + + ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret); + + sla_free(dev_data->dev_ctx, tio_status->devctx_size, true); + dev_data->dev_ctx = SLA_NULL; + + spdm_ctrl_free(dev_data); + + return ret; +} + +int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot) +{ + struct sev_data_tio_dev_connect connect = { + .length = sizeof(connect), + .tc_mask = tc_mask, + .cert_slot = cert_slot, + .dev_ctx_sla = dev_data->dev_ctx, + .ide_stream_id = { + ids[0], ids[1], ids[2], ids[3], + ids[4], ids[5], ids[6], ids[7] + }, + }; + int ret; + + if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx))) + return -EFAULT; + if (!(tc_mask & 1)) + return -EINVAL; + + ret = spdm_ctrl_alloc(dev_data); + if (ret) + return ret; + + spdm_ctrl_init(&connect.spdm_ctrl, dev_data); + + return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect), + &dev_data->psp_ret, dev_data); +} + +int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force) +{ + struct sev_data_tio_dev_disconnect dc = { + .length = sizeof(dc), + .dev_ctx_sla = dev_data->dev_ctx, + .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0, + }; + + if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx))) + return -EFAULT; + + spdm_ctrl_init(&dc.spdm_ctrl, dev_data); + + return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc), + &dev_data->psp_ret, dev_data); +} + +int sev_tio_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status); + case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init); + case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create); + case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim); + case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect); + case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect); + default: return 0; + } +} diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h new file mode 100644 index 000000000000..67512b3dbc53 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev-tio.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __PSP_SEV_TIO_H__ +#define __PSP_SEV_TIO_H__ + +#include <linux/pci-tsm.h> +#include <linux/pci-ide.h> +#include <linux/tsm.h> +#include <uapi/linux/psp-sev.h> + +struct sla_addr_t { + union { + u64 sla; + struct { + u64 page_type :1, + page_size :1, + reserved1 :10, + pfn :40, + reserved2 :12; + }; + }; +} __packed; + +#define SEV_TIO_MAX_COMMAND_LENGTH 128 + +/* SPDM control structure for DOE */ +struct tsm_spdm { + unsigned long req_len; + void *req; + unsigned long rsp_len; + void *rsp; +}; + +/* Describes TIO device */ +struct tsm_dsm_tio { + u8 cert_slot; + struct sla_addr_t dev_ctx; + struct sla_addr_t req; + struct sla_addr_t resp; + struct sla_addr_t scratch; + struct sla_addr_t output; + size_t output_len; + size_t scratch_len; + struct tsm_spdm spdm; + struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */ + struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */ + + int cmd; + int psp_ret; + u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH]; + void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */ + +#define TIO_IDE_MAX_TC 8 + struct pci_ide *ide[TIO_IDE_MAX_TC]; +}; + +/* Describes TSM structure for PF0 pointed by pci_dev->tsm */ +struct tio_dsm { + struct pci_tsm_pf0 tsm; + struct tsm_dsm_tio data; + struct sev_device *sev; +}; + +/* Data object IDs */ +#define SPDM_DOBJ_ID_NONE 0 +#define SPDM_DOBJ_ID_REQ 1 +#define SPDM_DOBJ_ID_RESP 2 + +struct spdm_dobj_hdr { + u32 id; /* Data object type identifier */ + u32 length; /* Length of the data object, INCLUDING THIS HEADER */ + struct { /* Version of the data object structure */ + u8 minor; + u8 major; + } version; +} __packed; + +/** + * struct sev_tio_status - TIO_STATUS command's info_paddr buffer + * + * @length: Length of this structure in bytes + * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO + * @tio_init_done: Indicates TIO_INIT has been invoked + * @spdm_req_size_min: Minimum SPDM request buffer size in bytes + * @spdm_req_size_max: Maximum SPDM request buffer size in bytes + * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes + * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes + * @spdm_out_size_min: Minimum SPDM output buffer size in bytes + * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes + * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes + * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes + * @devctx_size: Size of a device context buffer in bytes + * @tdictx_size: Size of a TDI context buffer in bytes + * @tio_crypto_alg: TIO crypto algorithms supported + */ +struct sev_tio_status { + u32 length; + u32 tio_en :1, + tio_init_done :1, + reserved :30; + u32 spdm_req_size_min; + u32 spdm_req_size_max; + u32 spdm_scratch_size_min; + u32 spdm_scratch_size_max; + u32 spdm_out_size_min; + u32 spdm_out_size_max; + u32 spdm_rsp_size_min; + u32 spdm_rsp_size_max; + u32 devctx_size; + u32 tdictx_size; + u32 tio_crypto_alg; + u8 reserved2[12]; +} __packed; + +int sev_tio_init_locked(void *tio_status_page); +int sev_tio_continue(struct tsm_dsm_tio *dev_data); + +int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id, + u8 segment_id); +int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot); +int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force); +int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data); + +#endif /* __PSP_SEV_TIO_H__ */ diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c new file mode 100644 index 000000000000..ea29cd5d0ff9 --- /dev/null +++ b/drivers/crypto/ccp/sev-dev-tsm.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0-only + +// Interface to CCP/SEV-TIO for generic PCIe TDISP module + +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/tsm.h> +#include <linux/iommu.h> +#include <linux/pci-doe.h> +#include <linux/bitfield.h> +#include <linux/module.h> + +#include <asm/sev-common.h> +#include <asm/sev.h> + +#include "psp-dev.h" +#include "sev-dev.h" +#include "sev-dev-tio.h" + +MODULE_IMPORT_NS("PCI_IDE"); + +#define TIO_DEFAULT_NR_IDE_STREAMS 1 + +static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS; +module_param_named(ide_nr, nr_ide_streams, uint, 0644); +MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB"); + +#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev)) +#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data)) +#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data)) +#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent) + +#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm)) + +static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret) +{ + struct tsm_dsm_tio *dev_data = &dsm->data; + struct tsm_spdm *spdm = &dev_data->spdm; + + /* Check the main command handler response before entering the loop */ + if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS) + return -EINVAL; + + if (ret <= 0) + return ret; + + /* ret > 0 means "SPDM requested" */ + while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) { + ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret, + spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len); + if (ret < 0) + break; + + WARN_ON_ONCE(ret == 0); /* The response should never be empty */ + spdm->rsp_len = ret; + ret = sev_tio_continue(dev_data); + } + + return ret; +} + +static int stream_enable(struct pci_ide *ide) +{ + struct pci_dev *rp = pcie_find_root_port(ide->pdev); + int ret; + + ret = pci_ide_stream_enable(rp, ide); + if (ret) + return ret; + + ret = pci_ide_stream_enable(ide->pdev, ide); + if (ret) + pci_ide_stream_disable(rp, ide); + + return ret; +} + +static int streams_enable(struct pci_ide **ide) +{ + int ret = 0; + + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (ide[i]) { + ret = stream_enable(ide[i]); + if (ret) + break; + } + } + + return ret; +} + +static void stream_disable(struct pci_ide *ide) +{ + pci_ide_stream_disable(ide->pdev, ide); + pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide); +} + +static void streams_disable(struct pci_ide **ide) +{ + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) + if (ide[i]) + stream_disable(ide[i]); +} + +static void stream_setup(struct pci_ide *ide) +{ + struct pci_dev *rp = pcie_find_root_port(ide->pdev); + + ide->partner[PCI_IDE_EP].rid_start = 0; + ide->partner[PCI_IDE_EP].rid_end = 0xffff; + ide->partner[PCI_IDE_RP].rid_start = 0; + ide->partner[PCI_IDE_RP].rid_end = 0xffff; + + ide->pdev->ide_cfg = 0; + ide->pdev->ide_tee_limit = 1; + rp->ide_cfg = 1; + rp->ide_tee_limit = 0; + + pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp)); + pci_ide_stream_setup(ide->pdev, ide); + pci_ide_stream_setup(rp, ide); +} + +static u8 streams_setup(struct pci_ide **ide, u8 *ids) +{ + bool def = false; + u8 tc_mask = 0; + int i; + + for (i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (!ide[i]) { + ids[i] = 0xFF; + continue; + } + + tc_mask |= BIT(i); + ids[i] = ide[i]->stream_id; + + if (!def) { + struct pci_ide_partner *settings; + + settings = pci_ide_to_settings(ide[i]->pdev, ide[i]); + settings->default_stream = 1; + def = true; + } + + stream_setup(ide[i]); + } + + return tc_mask; +} + +static int streams_register(struct pci_ide **ide) +{ + int ret = 0, i; + + for (i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (ide[i]) { + ret = pci_ide_stream_register(ide[i]); + if (ret) + break; + } + } + + return ret; +} + +static void streams_unregister(struct pci_ide **ide) +{ + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) + if (ide[i]) + pci_ide_stream_unregister(ide[i]); +} + +static void stream_teardown(struct pci_ide *ide) +{ + pci_ide_stream_teardown(ide->pdev, ide); + pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide); +} + +static void streams_teardown(struct pci_ide **ide) +{ + for (int i = 0; i < TIO_IDE_MAX_TC; ++i) { + if (ide[i]) { + stream_teardown(ide[i]); + pci_ide_stream_free(ide[i]); + ide[i] = NULL; + } + } +} + +static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide, + unsigned int tc) +{ + struct pci_dev *rp = pcie_find_root_port(pdev); + struct pci_ide *ide1; + + if (ide[tc]) { + pci_err(pdev, "Stream for class=%d already registered", tc); + return -EBUSY; + } + + /* FIXME: find a better way */ + if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS) + pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams); + pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams); + + ide1 = pci_ide_stream_alloc(pdev); + if (!ide1) + return -EFAULT; + + /* Blindly assign streamid=0 to TC=0, and so on */ + ide1->stream_id = tc; + + ide[tc] = ide1; + + return 0; +} + +static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev) +{ + struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL); + int rc; + + if (!dsm) + return NULL; + + rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev); + if (rc) + return NULL; + + pci_dbg(pdev, "TSM enabled\n"); + dsm->sev = sev; + return &no_free_ptr(dsm)->tsm.base_tsm; +} + +static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev) +{ + struct sev_device *sev = tsm_dev_to_sev(tsmdev); + + if (is_pci_tsm_pf0(pdev)) + return tio_pf0_probe(pdev, sev); + return 0; +} + +static void dsm_remove(struct pci_tsm *tsm) +{ + struct pci_dev *pdev = tsm->pdev; + + pci_dbg(pdev, "TSM disabled\n"); + + if (is_pci_tsm_pf0(pdev)) { + struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm); + + pci_tsm_pf0_destructor(&dsm->tsm); + kfree(dsm); + } +} + +static int dsm_create(struct tio_dsm *dsm) +{ + struct pci_dev *pdev = dsm->tsm.base_tsm.pdev; + u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0; + struct pci_dev *rootport = pcie_find_root_port(pdev); + u16 device_id = pci_dev_id(pdev); + u16 root_port_id; + u32 lnkcap = 0; + + if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP, + &lnkcap)) + return -ENODEV; + + root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap); + + return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id); +} + +static int dsm_connect(struct pci_dev *pdev) +{ + struct tio_dsm *dsm = pdev_to_tio_dsm(pdev); + struct tsm_dsm_tio *dev_data = &dsm->data; + u8 ids[TIO_IDE_MAX_TC]; + u8 tc_mask; + int ret; + + if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG, + PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) { + pci_err(pdev, "CMA DOE MB must support SSESSION\n"); + return -EFAULT; + } + + ret = stream_alloc(pdev, dev_data->ide, 0); + if (ret) + return ret; + + ret = dsm_create(dsm); + if (ret) + goto ide_free_exit; + + tc_mask = streams_setup(dev_data->ide, ids); + + ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot); + ret = sev_tio_spdm_cmd(dsm, ret); + if (ret) + goto free_exit; + + streams_enable(dev_data->ide); + + ret = streams_register(dev_data->ide); + if (ret) + goto free_exit; + + return 0; + +free_exit: + sev_tio_dev_reclaim(dev_data); + + streams_disable(dev_data->ide); +ide_free_exit: + + streams_teardown(dev_data->ide); + + return ret; +} + +static void dsm_disconnect(struct pci_dev *pdev) +{ + bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART; + struct tio_dsm *dsm = pdev_to_tio_dsm(pdev); + struct tsm_dsm_tio *dev_data = &dsm->data; + int ret; + + ret = sev_tio_dev_disconnect(dev_data, force); + ret = sev_tio_spdm_cmd(dsm, ret); + if (ret && !force) { + ret = sev_tio_dev_disconnect(dev_data, true); + sev_tio_spdm_cmd(dsm, ret); + } + + sev_tio_dev_reclaim(dev_data); + + streams_disable(dev_data->ide); + streams_unregister(dev_data->ide); + streams_teardown(dev_data->ide); +} + +static struct pci_tsm_ops sev_tsm_ops = { + .probe = dsm_probe, + .remove = dsm_remove, + .connect = dsm_connect, + .disconnect = dsm_disconnect, +}; + +void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page) +{ + struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL); + struct tsm_dev *tsmdev; + int ret; + + WARN_ON(sev->tio_status); + + if (!t) + return; + + ret = sev_tio_init_locked(tio_status_page); + if (ret) { + pr_warn("SEV-TIO STATUS failed with %d\n", ret); + goto error_exit; + } + + tsmdev = tsm_register(sev->dev, &sev_tsm_ops); + if (IS_ERR(tsmdev)) + goto error_exit; + + memcpy(t, tio_status_page, sizeof(*t)); + + pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d " + "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n", + t->tio_en, t->tio_init_done, + t->spdm_req_size_min, t->spdm_req_size_max, + t->spdm_rsp_size_min, t->spdm_rsp_size_max, + t->spdm_scratch_size_min, t->spdm_scratch_size_max, + t->spdm_out_size_min, t->spdm_out_size_max, + t->devctx_size, t->tdictx_size, + t->tio_crypto_alg); + + sev->tsmdev = tsmdev; + sev->tio_status = t; + + return; + +error_exit: + kfree(t); + pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n", + ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV)); +} + +void sev_tsm_uninit(struct sev_device *sev) +{ + if (sev->tsmdev) + tsm_unregister(sev->tsmdev); + + sev->tsmdev = NULL; +} diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index b28a6f50daaa..956ea609d0cc 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -75,6 +75,14 @@ static bool psp_init_on_probe = true; module_param(psp_init_on_probe, bool, 0444); MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); +#if IS_ENABLED(CONFIG_PCI_TSM) +static bool sev_tio_enabled = true; +module_param_named(tio, sev_tio_enabled, bool, 0444); +MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX"); +#else +static const bool sev_tio_enabled = false; +#endif + MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ @@ -251,7 +259,7 @@ static int sev_cmd_buffer_len(int cmd) case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit); case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info); case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load); - default: return 0; + default: return sev_tio_cmd_buffer_len(cmd); } return 0; @@ -380,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id) return sev_write_init_ex_file(); } -/* - * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked() - * needs snp_reclaim_pages(), so a forward declaration is needed. - */ -static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); - -static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) +int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked) { int ret, err, i; @@ -420,6 +422,7 @@ cleanup: snp_leak_pages(__phys_to_pfn(paddr), npages - i); return ret; } +EXPORT_SYMBOL_GPL(snp_reclaim_pages); static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked) { @@ -850,7 +853,7 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf) return 0; } -static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) +int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) { struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0}; struct psp_device *psp = psp_master; @@ -1392,6 +1395,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) * */ if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) { + bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED); + /* * Firmware checks that the pages containing the ranges enumerated * in the RANGES structure are either in the default page state or in the @@ -1432,6 +1437,17 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) data.init_rmp = 1; data.list_paddr_en = 1; data.list_paddr = __psp_pa(snp_range_list); + + data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported(); + + /* + * When psp_init_on_probe is disabled, the userspace calling + * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing + * unexpected state loss. + */ + if (data.tio_en && !psp_init_on_probe) + dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n"); + cmd = SEV_CMD_SNP_INIT_EX; } else { cmd = SEV_CMD_SNP_INIT; @@ -1469,7 +1485,8 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) snp_hv_fixed_pages_state_update(sev, HV_FIXED); sev->snp_initialized = true; - dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); + dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n", + data.tio_en ? "enabled" : "disabled"); dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major, sev->api_minor, sev->build); @@ -1477,6 +1494,23 @@ static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid) atomic_notifier_chain_register(&panic_notifier_list, &snp_panic_notifier); + if (data.tio_en) { + /* + * This executes with the sev_cmd_mutex held so down the stack + * snp_reclaim_pages(locked=false) might be needed (which is extremely + * unlikely) but will cause a deadlock. + * Instead of exporting __snp_alloc_firmware_pages(), allocate a page + * for this one call here. + */ + void *tio_status = page_address(__snp_alloc_firmware_pages( + GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true)); + + if (tio_status) { + sev_tsm_init_locked(sev, tio_status); + __snp_free_firmware_pages(virt_to_page(tio_status), 0, true); + } + } + sev_es_tmr_size = SNP_TMR_SIZE; return 0; @@ -2756,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) static void sev_firmware_shutdown(struct sev_device *sev) { + /* + * Calling without sev_cmd_mutex held as TSM will likely try disconnecting + * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex. + */ + if (sev->tio_status) + sev_tsm_uninit(sev); + mutex_lock(&sev_cmd_mutex); + __sev_firmware_shutdown(sev, false); + + kfree(sev->tio_status); + sev->tio_status = NULL; + mutex_unlock(&sev_cmd_mutex); } @@ -2770,6 +2816,43 @@ void sev_platform_shutdown(void) } EXPORT_SYMBOL_GPL(sev_platform_shutdown); +u64 sev_get_snp_policy_bits(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + u64 policy_bits; + + if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) + return 0; + + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + + policy_bits = SNP_POLICY_MASK_BASE; + + if (sev->snp_plat_status.feature_info) { + if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_RAPL_DIS; + + if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM; + + if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS; + + if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED) + policy_bits |= SNP_POLICY_MASK_CXL_ALLOW; + + if (sev_version_greater_or_equal(1, 58)) + policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE; + } + + return policy_bits; +} +EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits); + void sev_dev_destroy(struct psp_device *psp) { struct sev_device *sev = psp->sev_data; diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index ac03bd0848f7..b1cd556bbbf6 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -34,6 +34,8 @@ struct sev_misc_dev { struct miscdevice misc; }; +struct sev_tio_status; + struct sev_device { struct device *dev; struct psp_device *psp; @@ -61,15 +63,24 @@ struct sev_device { struct sev_user_data_snp_status snp_plat_status; struct snp_feature_info snp_feat_info_0; + + struct tsm_dev *tsmdev; + struct sev_tio_status *tio_status; }; int sev_dev_init(struct psp_device *psp); void sev_dev_destroy(struct psp_device *psp); +int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret); + void sev_pci_init(void); void sev_pci_exit(void); struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages); void snp_free_hv_fixed_pages(struct page *page); +void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page); +void sev_tsm_uninit(struct sev_device *sev); +int sev_tio_cmd_buffer_len(int cmd); + #endif /* __SEV_DEV_H */ |
