summaryrefslogtreecommitdiff
path: root/drivers/crypto/ccp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ccp')
-rw-r--r--drivers/crypto/ccp/Kconfig3
-rw-r--r--drivers/crypto/ccp/Makefile10
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c33
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c25
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c3
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c21
-rw-r--r--drivers/crypto/ccp/ccp-ops.c179
-rw-r--r--drivers/crypto/ccp/dbc.c259
-rw-r--r--drivers/crypto/ccp/dbc.h47
-rw-r--r--drivers/crypto/ccp/hsti.c138
-rw-r--r--drivers/crypto/ccp/hsti.h17
-rw-r--r--drivers/crypto/ccp/platform-access.c227
-rw-r--r--drivers/crypto/ccp/platform-access.h35
-rw-r--r--drivers/crypto/ccp/psp-dev.c173
-rw-r--r--drivers/crypto/ccp/psp-dev.h111
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c1863
-rw-r--r--drivers/crypto/ccp/sev-dev.h27
-rw-r--r--drivers/crypto/ccp/sfs.c311
-rw-r--r--drivers/crypto/ccp/sfs.h47
-rw-r--r--drivers/crypto/ccp/sp-dev.c14
-rw-r--r--drivers/crypto/ccp/sp-dev.h24
-rw-r--r--drivers/crypto/ccp/sp-pci.c236
-rw-r--r--drivers/crypto/ccp/sp-platform.c33
-rw-r--r--drivers/crypto/ccp/tee-dev.c63
-rw-r--r--drivers/crypto/ccp/tee-dev.h15
30 files changed, 4835 insertions, 486 deletions
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 32268e239bf1..f16a0f611317 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -38,7 +38,8 @@ config CRYPTO_DEV_CCP_CRYPTO
config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
- depends on CRYPTO_DEV_CCP_DD && X86_64
+ depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
+ select PCI_TSM if PCI
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index db362fe472ea..0424e08561ef 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -10,7 +10,15 @@ ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
sev-dev.o \
- tee-dev.o
+ tee-dev.o \
+ platform-access.o \
+ dbc.o \
+ hsti.o \
+ sfs.o
+
+ifeq ($(CONFIG_PCI_TSM),y)
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
+endif
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 918e223f21b6..685d42ec7ade 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -7,15 +7,16 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/ctr.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
@@ -267,24 +268,6 @@ static struct ccp_aes_def aes_algs[] = {
.alg_defaults = &ccp_aes_defaults,
},
{
- .mode = CCP_AES_MODE_CFB,
- .version = CCP_VERSION(3, 0),
- .name = "cfb(aes)",
- .driver_name = "cfb-aes-ccp",
- .blocksize = 1,
- .ivsize = AES_BLOCK_SIZE,
- .alg_defaults = &ccp_aes_defaults,
- },
- {
- .mode = CCP_AES_MODE_OFB,
- .version = CCP_VERSION(3, 0),
- .name = "ofb(aes)",
- .driver_name = "ofb-aes-ccp",
- .blocksize = 1,
- .ivsize = AES_BLOCK_SIZE,
- .alg_defaults = &ccp_aes_defaults,
- },
- {
.mode = CCP_AES_MODE_CTR,
.version = CCP_VERSION(3, 0),
.name = "ctr(aes)",
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
index afae30adb703..91b1189c47de 100644
--- a/drivers/crypto/ccp/ccp-crypto-des3.c
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -7,14 +7,15 @@
* Author: Gary R Hook <ghook@amd.com>
*/
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <crypto/algapi.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include "ccp-crypto.h"
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 73442a382f68..bc90aba5162a 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -7,14 +7,17 @@
* Author: Tom Lendacky <thomas.lendacky@amd.com>
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/ccp.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/ccp.h>
+#include <linux/module.h>
#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/akcipher.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include "ccp-crypto.h"
@@ -146,7 +149,7 @@ static void ccp_crypto_complete(void *data, int err)
/* Only propagate the -EINPROGRESS if necessary */
if (crypto_cmd->ret == -EBUSY) {
crypto_cmd->ret = -EINPROGRESS;
- req->complete(req, -EINPROGRESS);
+ crypto_request_complete(req, -EINPROGRESS);
}
return;
@@ -159,18 +162,18 @@ static void ccp_crypto_complete(void *data, int err)
held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
- backlog->req->complete(backlog->req, -EINPROGRESS);
+ crypto_request_complete(backlog->req, -EINPROGRESS);
}
/* Transition the state from -EBUSY to -EINPROGRESS first */
if (crypto_cmd->ret == -EBUSY)
- req->complete(req, -EINPROGRESS);
+ crypto_request_complete(req, -EINPROGRESS);
/* Completion callbacks */
ret = err;
if (ctx->complete)
ret = ctx->complete(req, ret);
- req->complete(req, ret);
+ crypto_request_complete(req, ret);
/* Submit the next cmd */
while (held) {
@@ -186,12 +189,12 @@ static void ccp_crypto_complete(void *data, int err)
ctx = crypto_tfm_ctx_dma(held->req->tfm);
if (ctx->complete)
ret = ctx->complete(held->req, ret);
- held->req->complete(held->req, ret);
+ crypto_request_complete(held->req, ret);
next = ccp_crypto_cmd_complete(held, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
- backlog->req->complete(backlog->req, -EINPROGRESS);
+ crypto_request_complete(backlog->req, -EINPROGRESS);
}
kfree(held);
diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c
index a1055554b47a..dc26bc22c91d 100644
--- a/drivers/crypto/ccp/ccp-debugfs.c
+++ b/drivers/crypto/ccp/ccp-debugfs.c
@@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp)
void ccp5_debugfs_destroy(void)
{
+ mutex_lock(&ccp_debugfs_lock);
debugfs_remove_recursive(ccp_debugfs_dir);
+ ccp_debugfs_dir = NULL;
+ mutex_unlock(&ccp_debugfs_lock);
}
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c531d13d971f..246801912e1a 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -507,7 +507,7 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
+ int len = min(sizeof(trng_value), max);
/* Locking is provided by the caller so we can update device
* hwrng-related fields safely
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 9f753cb4f5f1..b386a7063818 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -642,14 +642,26 @@ static void ccp_dma_release(struct ccp_device *ccp)
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
- if (dma_chan->client_count)
- dma_release_channel(dma_chan);
-
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
}
+static void ccp_dma_release_channels(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -770,8 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- ccp_dma_release(ccp);
+ ccp_dma_release_channels(ccp);
dma_async_device_unregister(dma_dev);
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index aa4e1a500691..d78865d9d5f0 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -8,13 +8,14 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/utils.h>
#include <linux/ccp.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include "ccp-dev.h"
@@ -179,8 +180,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
wa->dma.address = dma_map_single(wa->dev, wa->address, len,
dir);
- if (dma_mapping_error(wa->dev, wa->dma.address))
+ if (dma_mapping_error(wa->dev, wa->dma.address)) {
+ kfree(wa->address);
+ wa->address = NULL;
return -ENOMEM;
+ }
wa->dma.length = len;
}
@@ -629,10 +633,16 @@ static noinline_for_stack int
ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_aes_engine *aes = &cmd->u.aes;
- struct ccp_dm_workarea key, ctx, final_wa, tag;
- struct ccp_data src, dst;
- struct ccp_data aad;
- struct ccp_op op;
+ struct {
+ struct ccp_dm_workarea key;
+ struct ccp_dm_workarea ctx;
+ struct ccp_dm_workarea final;
+ struct ccp_dm_workarea tag;
+ struct ccp_data src;
+ struct ccp_data dst;
+ struct ccp_data aad;
+ struct ccp_op op;
+ } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL);
unsigned int dm_offset;
unsigned int authsize;
unsigned int jobid;
@@ -646,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
struct scatterlist *p_outp, sg_outp[2];
struct scatterlist *p_aad;
+ if (!wa)
+ return -ENOMEM;
+
if (!aes->iv)
return -EINVAL;
@@ -692,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
jobid = CCP_NEW_JOBID(cmd_q->ccp);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
/* Copy the key to the LSB */
- ret = ccp_init_dm_workarea(&key, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->key, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_TO_DEVICE);
if (ret)
return ret;
dm_offset = CCP_SB_BYTES - aes->key_len;
- ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+ ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len);
if (ret)
goto e_key;
- ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -722,58 +735,58 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
* There is an assumption here that the IV is 96 bits in length, plus
* a nonce of 32 bits. If no IV is present, use a zeroed buffer.
*/
- ret = ccp_init_dm_workarea(&ctx, cmd_q,
+ ret = ccp_init_dm_workarea(&wa->ctx, cmd_q,
CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
DMA_BIDIRECTIONAL);
if (ret)
goto e_key;
dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_ctx;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_ctx;
}
- op.init = 1;
+ wa->op.init = 1;
if (aes->aad_len > 0) {
/* Step 1: Run a GHASH over the Additional Authenticated Data */
- ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+ ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len,
AES_BLOCK_SIZE,
DMA_TO_DEVICE);
if (ret)
goto e_ctx;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHAAD;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHAAD;
- while (aad.sg_wa.bytes_left) {
- ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+ while (wa->aad.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true);
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_aad;
}
- ccp_process_data(&aad, NULL, &op);
- op.init = 0;
+ ccp_process_data(&wa->aad, NULL, &wa->op);
+ wa->op.init = 0;
}
}
- op.u.aes.mode = CCP_AES_MODE_GCTR;
- op.u.aes.action = aes->action;
+ wa->op.u.aes.mode = CCP_AES_MODE_GCTR;
+ wa->op.u.aes.action = aes->action;
if (ilen > 0) {
/* Step 2: Run a GCTR over the plaintext */
in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
- ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+ ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen,
AES_BLOCK_SIZE,
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
@@ -781,52 +794,52 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
goto e_aad;
if (in_place) {
- dst = src;
+ wa->dst = wa->src;
} else {
- ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+ ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen,
AES_BLOCK_SIZE, DMA_FROM_DEVICE);
if (ret)
goto e_src;
}
- op.soc = 0;
- op.eom = 0;
- op.init = 1;
- while (src.sg_wa.bytes_left) {
- ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
- if (!src.sg_wa.bytes_left) {
+ wa->op.soc = 0;
+ wa->op.eom = 0;
+ wa->op.init = 1;
+ while (wa->src.sg_wa.bytes_left) {
+ ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true);
+ if (!wa->src.sg_wa.bytes_left) {
unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
- op.eom = 1;
- op.u.aes.size = (nbytes * 8) - 1;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = (nbytes * 8) - 1;
}
}
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ccp_process_data(&src, &dst, &op);
- op.init = 0;
+ ccp_process_data(&wa->src, &wa->dst, &wa->op);
+ wa->op.init = 0;
}
}
/* Step 3: Update the IV portion of the context with the original IV */
- ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
goto e_dst;
}
- ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+ ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len);
if (ret)
goto e_dst;
- ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx,
CCP_PASSTHRU_BYTESWAP_256BIT);
if (ret) {
cmd->engine_error = cmd_q->cmd_error;
@@ -836,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
/* Step 4: Concatenate the lengths of the AAD and source, and
* hash that 16 byte buffer.
*/
- ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
if (ret)
goto e_dst;
- final = (__be64 *)final_wa.address;
+ final = (__be64 *)wa->final.address;
final[0] = cpu_to_be64(aes->aad_len * 8);
final[1] = cpu_to_be64(ilen * 8);
- memset(&op, 0, sizeof(op));
- op.cmd_q = cmd_q;
- op.jobid = jobid;
- op.sb_key = cmd_q->sb_key; /* Pre-allocated */
- op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
- op.init = 1;
- op.u.aes.type = aes->type;
- op.u.aes.mode = CCP_AES_MODE_GHASH;
- op.u.aes.action = CCP_AES_GHASHFINAL;
- op.src.type = CCP_MEMTYPE_SYSTEM;
- op.src.u.dma.address = final_wa.dma.address;
- op.src.u.dma.length = AES_BLOCK_SIZE;
- op.dst.type = CCP_MEMTYPE_SYSTEM;
- op.dst.u.dma.address = final_wa.dma.address;
- op.dst.u.dma.length = AES_BLOCK_SIZE;
- op.eom = 1;
- op.u.aes.size = 0;
- ret = cmd_q->ccp->vdata->perform->aes(&op);
+ memset(&wa->op, 0, sizeof(wa->op));
+ wa->op.cmd_q = cmd_q;
+ wa->op.jobid = jobid;
+ wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+ wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+ wa->op.init = 1;
+ wa->op.u.aes.type = aes->type;
+ wa->op.u.aes.mode = CCP_AES_MODE_GHASH;
+ wa->op.u.aes.action = CCP_AES_GHASHFINAL;
+ wa->op.src.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.src.u.dma.address = wa->final.dma.address;
+ wa->op.src.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.dst.type = CCP_MEMTYPE_SYSTEM;
+ wa->op.dst.u.dma.address = wa->final.dma.address;
+ wa->op.dst.u.dma.length = AES_BLOCK_SIZE;
+ wa->op.eom = 1;
+ wa->op.u.aes.size = 0;
+ ret = cmd_q->ccp->vdata->perform->aes(&wa->op);
if (ret)
goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_final_wa;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize);
if (ret) {
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
goto e_final_wa;
}
- ret = crypto_memneq(tag.address, final_wa.address,
+ ret = crypto_memneq(wa->tag.address, wa->final.address,
authsize) ? -EBADMSG : 0;
- ccp_dm_free(&tag);
+ ccp_dm_free(&wa->tag);
}
e_final_wa:
- ccp_dm_free(&final_wa);
+ ccp_dm_free(&wa->final);
e_dst:
if (ilen > 0 && !in_place)
- ccp_free_data(&dst, cmd_q);
+ ccp_free_data(&wa->dst, cmd_q);
e_src:
if (ilen > 0)
- ccp_free_data(&src, cmd_q);
+ ccp_free_data(&wa->src, cmd_q);
e_aad:
if (aes->aad_len)
- ccp_free_data(&aad, cmd_q);
+ ccp_free_data(&wa->aad, cmd_q);
e_ctx:
- ccp_dm_free(&ctx);
+ ccp_dm_free(&wa->ctx);
e_key:
- ccp_dm_free(&key);
+ ccp_dm_free(&wa->key);
return ret;
}
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
new file mode 100644
index 000000000000..410084a9039c
--- /dev/null
+++ b/drivers/crypto/ccp/dbc.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Dynamic Boost Control interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/mutex.h>
+
+#include "dbc.h"
+
+#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
+struct error_map {
+ u32 psp;
+ int ret;
+};
+
+#define DBC_ERROR_ACCESS_DENIED 0x0001
+#define DBC_ERROR_EXCESS_DATA 0x0004
+#define DBC_ERROR_BAD_PARAMETERS 0x0006
+#define DBC_ERROR_BAD_STATE 0x0007
+#define DBC_ERROR_NOT_IMPLEMENTED 0x0009
+#define DBC_ERROR_BUSY 0x000D
+#define DBC_ERROR_MESSAGE_FAILURE 0x0307
+#define DBC_ERROR_OVERFLOW 0x300F
+#define DBC_ERROR_SIGNATURE_INVALID 0x3072
+
+static struct error_map error_codes[] = {
+ {DBC_ERROR_ACCESS_DENIED, -EACCES},
+ {DBC_ERROR_EXCESS_DATA, -E2BIG},
+ {DBC_ERROR_BAD_PARAMETERS, -EINVAL},
+ {DBC_ERROR_BAD_STATE, -EAGAIN},
+ {DBC_ERROR_MESSAGE_FAILURE, -ENOENT},
+ {DBC_ERROR_NOT_IMPLEMENTED, -ENOENT},
+ {DBC_ERROR_BUSY, -EBUSY},
+ {DBC_ERROR_OVERFLOW, -ENFILE},
+ {DBC_ERROR_SIGNATURE_INVALID, -EPERM},
+ {0x0, 0x0},
+};
+
+static inline int send_dbc_cmd_thru_ext(struct psp_dbc_device *dbc_dev, int msg)
+{
+ dbc_dev->mbox->ext_req.header.sub_cmd_id = msg;
+
+ return psp_extended_mailbox_cmd(dbc_dev->psp,
+ DBC_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)dbc_dev->mbox);
+}
+
+static inline int send_dbc_cmd_thru_pa(struct psp_dbc_device *dbc_dev, int msg)
+{
+ return psp_send_platform_access_msg(msg,
+ (struct psp_request *)dbc_dev->mbox);
+}
+
+static int send_dbc_cmd(struct psp_dbc_device *dbc_dev, int msg)
+{
+ int ret;
+
+ *dbc_dev->result = 0;
+ ret = dbc_dev->use_ext ? send_dbc_cmd_thru_ext(dbc_dev, msg) :
+ send_dbc_cmd_thru_pa(dbc_dev, msg);
+ if (ret == -EIO) {
+ int i;
+
+ dev_dbg(dbc_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x\n",
+ msg, *dbc_dev->result);
+
+ for (i = 0; error_codes[i].psp; i++) {
+ if (*dbc_dev->result == error_codes[i].psp)
+ return error_codes[i].ret;
+ }
+ }
+
+ return ret;
+}
+
+static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
+{
+ int ret;
+
+ *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_nonce);
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+ if (ret == -EAGAIN) {
+ dev_dbg(dbc_dev->dev, "retrying get nonce\n");
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+ }
+
+ return ret;
+}
+
+static int send_dbc_parameter(struct psp_dbc_device *dbc_dev)
+{
+ struct dbc_user_param *user_param = (struct dbc_user_param *)dbc_dev->payload;
+
+ switch (user_param->msg_index) {
+ case PARAM_SET_FMAX_CAP:
+ case PARAM_SET_PWR_CAP:
+ case PARAM_SET_GFX_MODE:
+ return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER);
+ case PARAM_GET_FMAX_CAP:
+ case PARAM_GET_PWR_CAP:
+ case PARAM_GET_CURR_TEMP:
+ case PARAM_GET_FMAX_MAX:
+ case PARAM_GET_FMAX_MIN:
+ case PARAM_GET_SOC_PWR_MAX:
+ case PARAM_GET_SOC_PWR_MIN:
+ case PARAM_GET_SOC_PWR_CUR:
+ case PARAM_GET_GFX_MODE:
+ return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER);
+ }
+
+ return -EINVAL;
+}
+
+void dbc_dev_destroy(struct psp_device *psp)
+{
+ struct psp_dbc_device *dbc_dev = psp->dbc_data;
+
+ if (!dbc_dev)
+ return;
+
+ misc_deregister(&dbc_dev->char_dev);
+ mutex_destroy(&dbc_dev->ioctl_mutex);
+ psp->dbc_data = NULL;
+}
+
+static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ void __user *argp = (void __user *)arg;
+ struct psp_dbc_device *dbc_dev;
+ int ret;
+
+ if (!psp_master || !psp_master->dbc_data)
+ return -ENODEV;
+ dbc_dev = psp_master->dbc_data;
+
+ guard(mutex)(&dbc_dev->ioctl_mutex);
+
+ switch (cmd) {
+ case DBCIOCNONCE:
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
+
+ ret = send_dbc_nonce(dbc_dev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce)))
+ return -EFAULT;
+ break;
+ case DBCIOCUID:
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
+
+ *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
+ ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid)))
+ return -EFAULT;
+ break;
+ case DBCIOCPARAM:
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param)))
+ return -EFAULT;
+
+ *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
+ ret = send_dbc_parameter(dbc_dev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct file_operations dbc_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dbc_ioctl,
+};
+
+int dbc_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_dbc_device *dbc_dev;
+ int ret;
+
+ dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL);
+ if (!dbc_dev)
+ return -ENOMEM;
+
+ BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
+ dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!dbc_dev->mbox) {
+ ret = -ENOMEM;
+ goto cleanup_dev;
+ }
+
+ psp->dbc_data = dbc_dev;
+ dbc_dev->dev = dev;
+ dbc_dev->psp = psp;
+
+ if (psp->capability.dbc_thru_ext) {
+ dbc_dev->use_ext = true;
+ dbc_dev->payload_size = &dbc_dev->mbox->ext_req.header.payload_size;
+ dbc_dev->result = &dbc_dev->mbox->ext_req.header.status;
+ dbc_dev->payload = &dbc_dev->mbox->ext_req.buf;
+ dbc_dev->header_size = sizeof(struct psp_ext_req_buffer_hdr);
+ } else {
+ dbc_dev->payload_size = &dbc_dev->mbox->pa_req.header.payload_size;
+ dbc_dev->result = &dbc_dev->mbox->pa_req.header.status;
+ dbc_dev->payload = &dbc_dev->mbox->pa_req.buf;
+ dbc_dev->header_size = sizeof(struct psp_req_buffer_hdr);
+ }
+
+ ret = send_dbc_nonce(dbc_dev);
+ if (ret == -EACCES) {
+ dev_dbg(dbc_dev->dev,
+ "dynamic boost control was previously authenticated\n");
+ ret = 0;
+ }
+ dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n",
+ ret ? "un" : "");
+ if (ret) {
+ ret = 0;
+ goto cleanup_mbox;
+ }
+
+ dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR;
+ dbc_dev->char_dev.name = "dbc";
+ dbc_dev->char_dev.fops = &dbc_fops;
+ dbc_dev->char_dev.mode = 0600;
+ ret = misc_register(&dbc_dev->char_dev);
+ if (ret)
+ goto cleanup_mbox;
+
+ mutex_init(&dbc_dev->ioctl_mutex);
+
+ return 0;
+
+cleanup_mbox:
+ devm_free_pages(dev, (unsigned long)dbc_dev->mbox);
+
+cleanup_dev:
+ psp->dbc_data = NULL;
+ devm_kfree(dev, dbc_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h
new file mode 100644
index 000000000000..e0fecbe92eb1
--- /dev/null
+++ b/drivers/crypto/ccp/dbc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Dynamic Boost Control support
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __DBC_H__
+#define __DBC_H__
+
+#include <uapi/linux/psp-dbc.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_dbc_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ union dbc_buffer *mbox;
+
+ struct mutex ioctl_mutex;
+
+ struct miscdevice char_dev;
+
+ /* used to abstract communication path */
+ bool use_ext;
+ u32 header_size;
+ u32 *payload_size;
+ u32 *result;
+ void *payload;
+};
+
+union dbc_buffer {
+ struct psp_request pa_req;
+ struct psp_ext_request ext_req;
+};
+
+void dbc_dev_destroy(struct psp_device *psp);
+int dbc_dev_init(struct psp_device *psp);
+
+#endif /* __DBC_H */
diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c
new file mode 100644
index 000000000000..c29c6a9c0f3f
--- /dev/null
+++ b/drivers/crypto/ccp/hsti.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor device driver, security attributes
+ *
+ * Copyright (C) 2023-2024 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/device.h>
+
+#include "psp-dev.h"
+#include "hsti.h"
+
+#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8
+
+struct hsti_request {
+ struct psp_req_buffer_hdr header;
+ u32 hsti;
+} __packed;
+
+#define security_attribute_show(name) \
+static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct sp_device *sp = dev_get_drvdata(d); \
+ struct psp_device *psp = sp->psp_data; \
+ return sysfs_emit(buf, "%d\n", psp->capability.name); \
+}
+
+security_attribute_show(fused_part)
+static DEVICE_ATTR_RO(fused_part);
+security_attribute_show(debug_lock_on)
+static DEVICE_ATTR_RO(debug_lock_on);
+security_attribute_show(tsme_status)
+static DEVICE_ATTR_RO(tsme_status);
+security_attribute_show(anti_rollback_status)
+static DEVICE_ATTR_RO(anti_rollback_status);
+security_attribute_show(rpmc_production_enabled)
+static DEVICE_ATTR_RO(rpmc_production_enabled);
+security_attribute_show(rpmc_spirom_available)
+static DEVICE_ATTR_RO(rpmc_spirom_available);
+security_attribute_show(hsp_tpm_available)
+static DEVICE_ATTR_RO(hsp_tpm_available);
+security_attribute_show(rom_armor_enforced)
+static DEVICE_ATTR_RO(rom_armor_enforced);
+
+static struct attribute *psp_security_attrs[] = {
+ &dev_attr_fused_part.attr,
+ &dev_attr_debug_lock_on.attr,
+ &dev_attr_tsme_status.attr,
+ &dev_attr_anti_rollback_status.attr,
+ &dev_attr_rpmc_production_enabled.attr,
+ &dev_attr_rpmc_spirom_available.attr,
+ &dev_attr_hsp_tpm_available.attr,
+ &dev_attr_rom_armor_enforced.attr,
+ NULL
+};
+
+static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct sp_device *sp = dev_get_drvdata(dev);
+ struct psp_device *psp = sp->psp_data;
+
+ if (psp && psp->capability.security_reporting)
+ return 0444;
+
+ return 0;
+}
+
+struct attribute_group psp_security_attr_group = {
+ .attrs = psp_security_attrs,
+ .is_visible = psp_security_is_visible,
+};
+
+static int psp_populate_hsti(struct psp_device *psp)
+{
+ struct hsti_request *req;
+ int ret;
+
+ /* Are the security attributes already reported? */
+ if (psp->capability.security_reporting)
+ return 0;
+
+ /* Allocate command-response buffer */
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->header.payload_size = sizeof(*req);
+
+ ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req);
+ if (ret)
+ goto out;
+
+ if (req->header.status != 0) {
+ dev_dbg(psp->dev, "failed to populate HSTI state: %d\n", req->header.status);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ psp->capability.security_reporting = 1;
+ psp->capability.raw |= req->hsti << PSP_CAPABILITY_PSP_SECURITY_OFFSET;
+
+out:
+ kfree(req);
+
+ return ret;
+}
+
+int psp_init_hsti(struct psp_device *psp)
+{
+ int ret;
+
+ if (PSP_FEATURE(psp, HSTI)) {
+ ret = psp_populate_hsti(psp);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * At this stage, if security information hasn't been populated by
+ * either the PSP or by the driver through the platform command,
+ * then there is nothing more to do.
+ */
+ if (!psp->capability.security_reporting)
+ return 0;
+
+ if (psp->capability.tsme_status) {
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
+ else
+ dev_notice(psp->dev, "psp: TSME enabled\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/hsti.h b/drivers/crypto/ccp/hsti.h
new file mode 100644
index 000000000000..6a70f922d2c4
--- /dev/null
+++ b/drivers/crypto/ccp/hsti.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Secure Processor device driver, security attributes
+ *
+ * Copyright (C) 2023-2024 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __HSTI_H
+#define __HSTI_H
+
+extern struct attribute_group psp_security_attr_group;
+
+int psp_init_hsti(struct psp_device *psp);
+
+#endif /* __HSTI_H */
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
new file mode 100644
index 000000000000..1b8ed3389733
--- /dev/null
+++ b/drivers/crypto/ccp/platform-access.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ *
+ * Some of this code is adapted from drivers/i2c/busses/i2c-designware-amdpsp.c
+ * developed by Jan Dabros <jsd@semihalf.com> and Copyright (C) 2022 Google Inc.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "platform-access.h"
+
+#define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC)
+#define DOORBELL_CMDRESP_STS GENMASK(7, 0)
+
+/* Recovery field should be equal 0 to start sending commands */
+static int check_recovery(u32 __iomem *cmd)
+{
+ return FIELD_GET(PSP_CMDRESP_RECOVERY, ioread32(cmd));
+}
+
+static int wait_cmd(u32 __iomem *cmd)
+{
+ u32 tmp, expected;
+
+ /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */
+ expected = FIELD_PREP(PSP_CMDRESP_RESP, 1);
+
+ /*
+ * Check for readiness of PSP mailbox in a tight loop in order to
+ * process further as soon as command was consumed.
+ */
+ return readl_poll_timeout(cmd, tmp, (tmp & expected), 0,
+ PSP_CMD_TIMEOUT_US);
+}
+
+int psp_check_platform_access_status(void)
+{
+ struct psp_device *psp = psp_get_master_device();
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL(psp_check_platform_access_status);
+
+int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
+ struct psp_request *req)
+{
+ struct psp_device *psp = psp_get_master_device();
+ u32 __iomem *cmd, *lo, *hi;
+ struct psp_platform_access_device *pa_dev;
+ phys_addr_t req_addr;
+ u32 cmd_reg;
+ int ret;
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ pa_dev = psp->platform_access_data;
+
+ if (!pa_dev->vdata->cmdresp_reg || !pa_dev->vdata->cmdbuff_addr_lo_reg ||
+ !pa_dev->vdata->cmdbuff_addr_hi_reg)
+ return -ENODEV;
+
+ cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg;
+ lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg;
+ hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg;
+
+ mutex_lock(&pa_dev->mailbox_mutex);
+
+ if (check_recovery(cmd)) {
+ dev_dbg(psp->dev, "platform mailbox is in recovery\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (wait_cmd(cmd)) {
+ dev_dbg(psp->dev, "platform mailbox is not done processing command\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * Fill mailbox with address of command-response buffer, which will be
+ * used for sending i2c requests as well as reading status returned by
+ * PSP. Use physical address of buffer, since PSP will map this region.
+ */
+ req_addr = __psp_pa(req);
+ iowrite32(lower_32_bits(req_addr), lo);
+ iowrite32(upper_32_bits(req_addr), hi);
+
+ print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ /* Write command register to trigger processing */
+ cmd_reg = FIELD_PREP(PSP_CMDRESP_CMD, msg);
+ iowrite32(cmd_reg, cmd);
+
+ if (wait_cmd(cmd)) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* Ensure it was triggered by this driver */
+ if (ioread32(lo) != lower_32_bits(req_addr) ||
+ ioread32(hi) != upper_32_bits(req_addr)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ /*
+ * Read status from PSP. If status is non-zero, it indicates an error
+ * occurred during "processing" of the command.
+ * If status is zero, it indicates the command was "processed"
+ * successfully, but the result of the command is in the payload.
+ * Return both cases to the caller as -EIO to investigate.
+ */
+ cmd_reg = ioread32(cmd);
+ if (FIELD_GET(PSP_CMDRESP_STS, cmd_reg))
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+ if (req->header.status) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ ret = 0;
+
+unlock:
+ mutex_unlock(&pa_dev->mailbox_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(psp_send_platform_access_msg);
+
+int psp_ring_platform_doorbell(int msg, u32 *result)
+{
+ struct psp_device *psp = psp_get_master_device();
+ struct psp_platform_access_device *pa_dev;
+ u32 __iomem *button, *cmd;
+ int ret, val;
+
+ if (!psp || !psp->platform_access_data)
+ return -ENODEV;
+
+ pa_dev = psp->platform_access_data;
+ button = psp->io_regs + pa_dev->vdata->doorbell_button_reg;
+ cmd = psp->io_regs + pa_dev->vdata->doorbell_cmd_reg;
+
+ mutex_lock(&pa_dev->doorbell_mutex);
+
+ if (wait_cmd(cmd)) {
+ dev_err(psp->dev, "doorbell command not done processing\n");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ iowrite32(FIELD_PREP(DOORBELL_CMDRESP_STS, msg), cmd);
+ iowrite32(PSP_DRBL_RING, button);
+
+ if (wait_cmd(cmd)) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ val = FIELD_GET(DOORBELL_CMDRESP_STS, ioread32(cmd));
+ if (val) {
+ if (result)
+ *result = val;
+ ret = -EIO;
+ goto unlock;
+ }
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pa_dev->doorbell_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(psp_ring_platform_doorbell);
+
+void platform_access_dev_destroy(struct psp_device *psp)
+{
+ struct psp_platform_access_device *pa_dev = psp->platform_access_data;
+
+ if (!pa_dev)
+ return;
+
+ mutex_destroy(&pa_dev->mailbox_mutex);
+ mutex_destroy(&pa_dev->doorbell_mutex);
+ psp->platform_access_data = NULL;
+}
+
+int platform_access_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct psp_platform_access_device *pa_dev;
+
+ pa_dev = devm_kzalloc(dev, sizeof(*pa_dev), GFP_KERNEL);
+ if (!pa_dev)
+ return -ENOMEM;
+
+ psp->platform_access_data = pa_dev;
+ pa_dev->psp = psp;
+ pa_dev->dev = dev;
+
+ pa_dev->vdata = (struct platform_access_vdata *)psp->vdata->platform_access;
+
+ mutex_init(&pa_dev->mailbox_mutex);
+ mutex_init(&pa_dev->doorbell_mutex);
+
+ dev_dbg(dev, "platform access enabled\n");
+
+ return 0;
+}
diff --git a/drivers/crypto/ccp/platform-access.h b/drivers/crypto/ccp/platform-access.h
new file mode 100644
index 000000000000..a83f03beb869
--- /dev/null
+++ b/drivers/crypto/ccp/platform-access.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Platform Access interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_PLATFORM_ACCESS_H__
+#define __PSP_PLATFORM_ACCESS_H__
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_platform_access_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct platform_access_vdata *vdata;
+
+ struct mutex mailbox_mutex;
+ struct mutex doorbell_mutex;
+
+ void *platform_access_data;
+};
+
+void platform_access_dev_destroy(struct psp_device *psp);
+int platform_access_dev_init(struct psp_device *psp);
+
+#endif /* __PSP_PLATFORM_ACCESS_H__ */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index c9c741ac8442..9e21da0e298a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -9,14 +9,101 @@
#include <linux/kernel.h>
#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
#include "sp-dev.h"
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "sfs.h"
+#include "platform-access.h"
+#include "dbc.h"
+#include "hsti.h"
struct psp_device *psp_master;
+#define PSP_C2PMSG_17_CMDRESP_CMD GENMASK(19, 16)
+
+static int psp_mailbox_poll(const void __iomem *cmdresp_reg, unsigned int *cmdresp,
+ unsigned int timeout_msecs)
+{
+ while (true) {
+ *cmdresp = ioread32(cmdresp_reg);
+ if (FIELD_GET(PSP_CMDRESP_RESP, *cmdresp))
+ return 0;
+
+ if (!timeout_msecs--)
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int psp_mailbox_command(struct psp_device *psp, enum psp_cmd cmd, void *cmdbuff,
+ unsigned int timeout_msecs, unsigned int *cmdresp)
+{
+ void __iomem *cmdresp_reg, *cmdbuff_lo_reg, *cmdbuff_hi_reg;
+ int ret;
+
+ if (!psp || !psp->vdata || !psp->vdata->cmdresp_reg ||
+ !psp->vdata->cmdbuff_addr_lo_reg || !psp->vdata->cmdbuff_addr_hi_reg)
+ return -ENODEV;
+
+ cmdresp_reg = psp->io_regs + psp->vdata->cmdresp_reg;
+ cmdbuff_lo_reg = psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg;
+ cmdbuff_hi_reg = psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg;
+
+ mutex_lock(&psp->mailbox_mutex);
+
+ /* Ensure mailbox is ready for a command */
+ ret = -EBUSY;
+ if (psp_mailbox_poll(cmdresp_reg, cmdresp, 0))
+ goto unlock;
+
+ if (cmdbuff) {
+ iowrite32(lower_32_bits(__psp_pa(cmdbuff)), cmdbuff_lo_reg);
+ iowrite32(upper_32_bits(__psp_pa(cmdbuff)), cmdbuff_hi_reg);
+ }
+
+ *cmdresp = FIELD_PREP(PSP_C2PMSG_17_CMDRESP_CMD, cmd);
+ iowrite32(*cmdresp, cmdresp_reg);
+
+ ret = psp_mailbox_poll(cmdresp_reg, cmdresp, timeout_msecs);
+
+unlock:
+ mutex_unlock(&psp->mailbox_mutex);
+
+ return ret;
+}
+
+int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
+ struct psp_ext_request *req)
+{
+ unsigned int reg;
+ int ret;
+
+ print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ ret = psp_mailbox_command(psp, PSP_CMD_TEE_EXTENDED_CMD, (void *)req,
+ timeout_msecs, &reg);
+ if (ret) {
+ return ret;
+ } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, reg);
+ return -EIO;
+ }
+
+ print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ return 0;
+}
+
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
struct device *dev = sp->dev;
@@ -42,18 +129,15 @@ static irqreturn_t psp_irq_handler(int irq, void *data)
/* Read the interrupt status: */
status = ioread32(psp->io_regs + psp->vdata->intsts_reg);
+ /* Clear the interrupt status by writing the same value we read. */
+ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
+
/* invoke subdevice interrupt handlers */
if (status) {
if (psp->sev_irq_handler)
psp->sev_irq_handler(irq, psp->sev_irq_data, status);
-
- if (psp->tee_irq_handler)
- psp->tee_irq_handler(irq, psp->tee_irq_data, status);
}
- /* Clear the interrupt status by writing the same value we read. */
- iowrite32(status, psp->io_regs + psp->vdata->intsts_reg);
-
return IRQ_HANDLED;
}
@@ -72,13 +156,7 @@ static unsigned int psp_get_capability(struct psp_device *psp)
dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n");
return -ENODEV;
}
- psp->capability = val;
-
- /* Detect if TSME and SME are both enabled */
- if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING &&
- psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) &&
- cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
- dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
+ psp->capability.raw = val;
return 0;
}
@@ -86,7 +164,7 @@ static unsigned int psp_get_capability(struct psp_device *psp)
static int psp_check_sev_support(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(psp->capability & PSP_CAPABILITY_SEV)) {
+ if (!psp->capability.sev) {
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@@ -97,7 +175,7 @@ static int psp_check_sev_support(struct psp_device *psp)
static int psp_check_tee_support(struct psp_device *psp)
{
/* Check if device supports TEE feature */
- if (!(psp->capability & PSP_CAPABILITY_TEE)) {
+ if (!psp->capability.tee) {
dev_dbg(psp->dev, "psp does not support TEE\n");
return -ENODEV;
}
@@ -105,6 +183,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static int psp_check_sfs_support(struct psp_device *psp)
+{
+ /* Check if device supports SFS feature */
+ if (!psp->capability.sfs) {
+ dev_dbg(psp->dev, "psp does not support SFS\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -121,6 +210,31 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (!psp_check_sfs_support(psp)) {
+ ret = sfs_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
+ if (psp->vdata->platform_access) {
+ ret = platform_access_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* dbc must come after platform access as it tests the feature */
+ if (PSP_FEATURE(psp, DBC) ||
+ psp->capability.dbc_thru_ext) {
+ ret = dbc_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* HSTI uses platform access on some systems. */
+ ret = psp_init_hsti(psp);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -145,6 +259,7 @@ int psp_dev_init(struct sp_device *sp)
}
psp->io_regs = sp->io_map;
+ mutex_init(&psp->mailbox_mutex);
ret = psp_get_capability(psp);
if (ret)
@@ -161,13 +276,14 @@ int psp_dev_init(struct sp_device *sp)
goto e_err;
}
+ /* master device must be set for platform access */
+ if (psp->sp->set_psp_master_device)
+ psp->sp->set_psp_master_device(psp->sp);
+
ret = psp_init(psp);
if (ret)
goto e_irq;
- if (sp->set_psp_master_device)
- sp->set_psp_master_device(sp);
-
/* Enable interrupt */
iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
@@ -176,6 +292,9 @@ int psp_dev_init(struct sp_device *sp)
return 0;
e_irq:
+ if (sp->clear_psp_master_device)
+ sp->clear_psp_master_device(sp);
+
sp_free_psp_irq(psp->sp, psp);
e_err:
sp->psp_data = NULL;
@@ -201,6 +320,12 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ sfs_dev_destroy(psp);
+
+ dbc_dev_destroy(psp);
+
+ platform_access_dev_destroy(psp);
+
sp_free_psp_irq(sp, psp);
if (sp->clear_psp_master_device)
@@ -219,18 +344,6 @@ void psp_clear_sev_irq_handler(struct psp_device *psp)
psp_set_sev_irq_handler(psp, NULL, NULL);
}
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
- void *data)
-{
- psp->tee_irq_data = data;
- psp->tee_irq_handler = handler;
-}
-
-void psp_clear_tee_irq_handler(struct psp_device *psp)
-{
- psp_set_tee_irq_handler(psp, NULL, NULL);
-}
-
struct psp_device *psp_get_master_device(void)
{
struct sp_device *sp = sp_get_psp_master_device();
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index d528eb04c3ef..268c83f298cb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -14,18 +14,42 @@
#include <linux/list.h>
#include <linux/bits.h>
#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/psp.h>
+#include <linux/psp-platform-access.h>
#include "sp-dev.h"
-#define PSP_CMDRESP_RESP BIT(31)
-#define PSP_CMDRESP_ERR_MASK 0xffff
-
#define MAX_PSP_NAME_LEN 16
extern struct psp_device *psp_master;
typedef void (*psp_irq_handler_t)(int, void *, unsigned int);
+union psp_cap_register {
+ unsigned int raw;
+ struct {
+ unsigned int sev :1,
+ tee :1,
+ dbc_thru_ext :1,
+ sfs :1,
+ rsvd1 :3,
+ security_reporting :1,
+ fused_part :1,
+ rsvd2 :1,
+ debug_lock_on :1,
+ rsvd3 :2,
+ tsme_status :1,
+ rsvd4 :1,
+ anti_rollback_status :1,
+ rpmc_production_enabled :1,
+ rpmc_spirom_available :1,
+ hsp_tpm_available :1,
+ rom_armor_enforced :1,
+ rsvd5 :12;
+ };
+};
+
struct psp_device {
struct list_head entry;
@@ -36,47 +60,78 @@ struct psp_device {
struct sp_device *sp;
void __iomem *io_regs;
+ struct mutex mailbox_mutex;
psp_irq_handler_t sev_irq_handler;
void *sev_irq_data;
- psp_irq_handler_t tee_irq_handler;
- void *tee_irq_data;
-
void *sev_data;
void *tee_data;
+ void *platform_access_data;
+ void *dbc_data;
+ void *sfs_data;
- unsigned int capability;
+ union psp_cap_register capability;
};
void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
void *data);
void psp_clear_sev_irq_handler(struct psp_device *psp);
-void psp_set_tee_irq_handler(struct psp_device *psp, psp_irq_handler_t handler,
- void *data);
-void psp_clear_tee_irq_handler(struct psp_device *psp);
-
struct psp_device *psp_get_master_device(void);
-#define PSP_CAPABILITY_SEV BIT(0)
-#define PSP_CAPABILITY_TEE BIT(1)
-#define PSP_CAPABILITY_PSP_SECURITY_REPORTING BIT(7)
+/**
+ * enum psp_cmd - PSP mailbox commands
+ * @PSP_CMD_TEE_RING_INIT: Initialize TEE ring buffer
+ * @PSP_CMD_TEE_RING_DESTROY: Destroy TEE ring buffer
+ * @PSP_CMD_TEE_EXTENDED_CMD: Extended command
+ * @PSP_CMD_MAX: Maximum command id
+ */
+enum psp_cmd {
+ PSP_CMD_TEE_RING_INIT = 1,
+ PSP_CMD_TEE_RING_DESTROY = 2,
+ PSP_CMD_TEE_EXTENDED_CMD = 14,
+ PSP_CMD_MAX = 15,
+};
-#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8
-/*
- * The PSP doesn't directly store these bits in the capability register
- * but instead copies them from the results of query command.
- *
- * The offsets from the query command are below, and shifted when used.
+int psp_mailbox_command(struct psp_device *psp, enum psp_cmd cmd, void *cmdbuff,
+ unsigned int timeout_msecs, unsigned int *cmdresp);
+
+/**
+ * struct psp_ext_req_buffer_hdr - Structure of the extended command header
+ * @payload_size: total payload size
+ * @sub_cmd_id: extended command ID
+ * @status: status of command execution (out)
+ */
+struct psp_ext_req_buffer_hdr {
+ u32 payload_size;
+ u32 sub_cmd_id;
+ u32 status;
+} __packed;
+
+struct psp_ext_request {
+ struct psp_ext_req_buffer_hdr header;
+ void *buf;
+} __packed;
+
+/**
+ * enum psp_sub_cmd - PSP mailbox sub commands
+ * @PSP_SUB_CMD_DBC_GET_NONCE: Get nonce from DBC
+ * @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC
+ * @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC
+ * @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC
+ * @PSP_SUB_CMD_SFS_GET_FW_VERS: Get firmware versions for ASP and other MP
+ * @PSP_SUB_CMD_SFS_UPDATE: Command to load, verify and execute SFS package
*/
-#define PSP_SECURITY_FUSED_PART BIT(0)
-#define PSP_SECURITY_DEBUG_LOCK_ON BIT(2)
-#define PSP_SECURITY_TSME_STATUS BIT(5)
-#define PSP_SECURITY_ANTI_ROLLBACK_STATUS BIT(7)
-#define PSP_SECURITY_RPMC_PRODUCTION_ENABLED BIT(8)
-#define PSP_SECURITY_RPMC_SPIROM_AVAILABLE BIT(9)
-#define PSP_SECURITY_HSP_TPM_AVAILABLE BIT(10)
-#define PSP_SECURITY_ROM_ARMOR_ENFORCED BIT(11)
+enum psp_sub_cmd {
+ PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE,
+ PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID,
+ PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER,
+ PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER,
+ PSP_SUB_CMD_SFS_GET_FW_VERS = PSP_SFS_GET_FW_VERSIONS,
+ PSP_SUB_CMD_SFS_UPDATE = PSP_SFS_UPDATE,
+};
+int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
+ struct psp_ext_request *req);
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c
new file mode 100644
index 000000000000..9a98f98c20a7
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to PSP for CCP/SEV-TIO/SNP-VM
+
+#include <linux/pci.h>
+#include <linux/tsm.h>
+#include <linux/psp.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/pci-doe.h>
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+#include <asm/page.h>
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+#define to_tio_status(dev_data) \
+ (container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
+
+#define SLA_PAGE_TYPE_DATA 0
+#define SLA_PAGE_TYPE_SCATTER 1
+#define SLA_PAGE_SIZE_4K 0
+#define SLA_PAGE_SIZE_2M 1
+#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
+#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
+#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
+#define SLA_NULL ((struct sla_addr_t) { 0 })
+#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
+#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
+
+static phys_addr_t sla_to_pa(struct sla_addr_t sla)
+{
+ u64 pfn = sla.pfn;
+ u64 pa = pfn << PAGE_SHIFT;
+
+ return pa;
+}
+
+static void *sla_to_va(struct sla_addr_t sla)
+{
+ void *va = __va(__sme_clr(sla_to_pa(sla)));
+
+ return va;
+}
+
+#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
+#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
+
+static struct sla_addr_t make_sla(struct page *pg, bool stp)
+{
+ u64 pa = __sme_set(page_to_phys(pg));
+ struct sla_addr_t ret = {
+ .pfn = pa >> PAGE_SHIFT,
+ .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
+ .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
+ };
+
+ return ret;
+}
+
+/* the BUFFER Structure */
+#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
+
+/*
+ * struct sla_buffer_hdr - Scatter list address buffer header
+ *
+ * @capacity_sz: Total capacity of the buffer in bytes
+ * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
+ * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
+ * @iv: Initialization vector used for encryption
+ * @authtag: Authentication tag for encrypted buffer
+ */
+struct sla_buffer_hdr {
+ u32 capacity_sz;
+ u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
+ u32 flags;
+ u8 reserved1[4];
+ u8 iv[16]; /* IV used for the encryption of this buffer */
+ u8 authtag[16]; /* Authentication tag for this buffer */
+ u8 reserved2[16];
+} __packed;
+
+enum spdm_data_type_t {
+ DOBJ_DATA_TYPE_SPDM = 0x1,
+ DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
+};
+
+struct spdm_dobj_hdr_req {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+struct spdm_dobj_hdr_resp {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
+struct spdm_dobj_hdr_cert;
+struct spdm_dobj_hdr_meas;
+struct spdm_dobj_hdr_report;
+
+/* Used in all SPDM-aware TIO commands */
+struct spdm_ctrl {
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+} __packed;
+
+static size_t sla_dobj_id_to_size(u8 id)
+{
+ size_t n;
+
+ BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
+ switch (id) {
+ case SPDM_DOBJ_ID_REQ:
+ n = sizeof(struct spdm_dobj_hdr_req);
+ break;
+ case SPDM_DOBJ_ID_RESP:
+ n = sizeof(struct spdm_dobj_hdr_resp);
+ break;
+ default:
+ WARN_ON(1);
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
+#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
+#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
+
+#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
+#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return NULL;
+
+ return (struct spdm_dobj_hdr *) &buf[1];
+}
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(!hdr))
+ return NULL;
+
+ if (hdr->id != check_dobjid) {
+ pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
+ return NULL;
+ }
+
+ return hdr;
+}
+
+static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
+ return NULL;
+
+ if (!hdr)
+ return NULL;
+
+ return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
+}
+
+/*
+ * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
+ *
+ * @length: Length of this command buffer in bytes
+ * @status_paddr: System physical address of the TIO_STATUS structure
+ */
+struct sev_data_tio_status {
+ u32 length;
+ u8 reserved[4];
+ u64 status_paddr;
+} __packed;
+
+/* TIO_INIT */
+struct sev_data_tio_init {
+ u32 length;
+ u8 reserved[12];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_create - TIO_DEV_CREATE command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
+ * @device_id: PCIe Routing Identifier of the device to connect to
+ * @root_port_id: PCIe Routing Identifier of the root port of the device
+ * @segment_id: PCIe Segment Identifier of the device to connect to
+ */
+struct sev_data_tio_dev_create {
+ u32 length;
+ u8 reserved1[4];
+ struct sla_addr_t dev_ctx_sla;
+ u16 device_id;
+ u16 root_port_id;
+ u8 segment_id;
+ u8 reserved2[11];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
+ * Setting the kth bit of the TC_MASK to 1 indicates that the traffic
+ * class k will be initialized
+ * @cert_slot: Slot number of the certificate requested for constructing the SPDM session
+ * @ide_stream_id: IDE stream IDs to be associated with this device.
+ * Valid only if corresponding bit in TC_MASK is set
+ */
+struct sev_data_tio_dev_connect {
+ u32 length;
+ u8 reserved1[4];
+ struct spdm_ctrl spdm_ctrl;
+ u8 reserved2[8];
+ struct sla_addr_t dev_ctx_sla;
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 reserved3[6];
+ u8 ide_stream_id[8];
+ u8 reserved4[8];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
+
+struct sev_data_tio_dev_disconnect {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @meas_nonce: Nonce for measurement freshness verification
+ */
+#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
+
+struct sev_data_tio_dev_meas {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+ u8 meas_nonce[32];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+struct sev_data_tio_dev_certs {
+ u32 length;
+ u8 reserved[4];
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ *
+ * This command reclaims resources associated with a device context.
+ */
+struct sev_data_tio_dev_reclaim {
+ u32 length;
+ u8 reserved[4];
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
+{
+ struct sla_buffer_hdr *buf;
+
+ BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
+ if (IS_SLA_NULL(sla))
+ return NULL;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
+ return NULL;
+
+ if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
+ return NULL;
+
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!npages))
+ return NULL;
+
+ struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
+
+ if (!pp)
+ return NULL;
+
+ for (i = 0; i < npages; ++i)
+ pp[i] = sla_to_page(scatter[i]);
+
+ buf = vm_map_ram(pp, npages, 0);
+ kfree(pp);
+ } else {
+ struct page *pg = sla_to_page(sla);
+
+ buf = vm_map_ram(&pg, 1, 0);
+ }
+
+ return buf;
+}
+
+static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (!npages)
+ return;
+
+ vm_unmap_ram(buf, npages);
+ } else {
+ vm_unmap_ram(buf, 1);
+ }
+}
+
+static void dobj_response_init(struct sla_buffer_hdr *buf)
+{
+ struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
+
+ dobj->id = SPDM_DOBJ_ID_RESP;
+ dobj->version.major = 0x1;
+ dobj->version.minor = 0;
+ dobj->length = 0;
+ buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
+}
+
+static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
+{
+ unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ int ret = 0, i;
+
+ if (IS_SLA_NULL(sla))
+ return;
+
+ if (firmware_state) {
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ scatter = sla_to_va(sla);
+
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+
+ ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
+ if (ret)
+ break;
+ }
+ } else {
+ ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
+ }
+ }
+
+ if (WARN_ON(ret))
+ return;
+
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+ free_page((unsigned long)sla_to_va(scatter[i]));
+ }
+ }
+
+ free_page((unsigned long)sla_to_va(sla));
+}
+
+static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
+{
+ unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ struct sla_addr_t ret = SLA_NULL;
+ struct sla_buffer_hdr *buf;
+ struct page *pg;
+
+ if (npages == 0)
+ return ret;
+
+ if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
+ return ret;
+
+ BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
+
+ if (npages > 1) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, true);
+ scatter = page_to_virt(pg);
+ for (i = 0; i < npages; ++i) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ goto no_reclaim_exit;
+
+ scatter[i] = make_sla(pg, false);
+ }
+ scatter[i] = SLA_EOL;
+ } else {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, false);
+ }
+
+ buf = sla_buffer_map(ret);
+ if (!buf)
+ goto no_reclaim_exit;
+
+ buf->capacity_sz = (npages << PAGE_SHIFT);
+ sla_buffer_unmap(ret, buf);
+
+ if (firmware_state) {
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
+ PG_LEVEL_4K, 0, true))
+ goto free_exit;
+ }
+ } else {
+ if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
+ goto no_reclaim_exit;
+ }
+ }
+
+ return ret;
+
+no_reclaim_exit:
+ firmware_state = false;
+free_exit:
+ sla_free(ret, len, firmware_state);
+ return SLA_NULL;
+}
+
+/* Expands a buffer, only firmware owned buffers allowed for now */
+static int sla_expand(struct sla_addr_t *sla, size_t *len)
+{
+ struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
+ struct sla_addr_t oldsla = *sla, newsla;
+ size_t oldlen = *len, newlen;
+
+ if (!oldbuf)
+ return -EFAULT;
+
+ newlen = oldbuf->capacity_sz;
+ if (oldbuf->capacity_sz == oldlen) {
+ /* This buffer does not require expansion, must be another buffer */
+ sla_buffer_unmap(oldsla, oldbuf);
+ return 1;
+ }
+
+ pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
+
+ newsla = sla_alloc(newlen, true);
+ if (IS_SLA_NULL(newsla))
+ return -ENOMEM;
+
+ newbuf = sla_buffer_map(newsla);
+ if (!newbuf) {
+ sla_free(newsla, newlen, true);
+ return -EFAULT;
+ }
+
+ memcpy(newbuf, oldbuf, oldlen);
+
+ sla_buffer_unmap(newsla, newbuf);
+ sla_free(oldsla, oldlen, true);
+ *sla = newsla;
+ *len = newlen;
+
+ return 0;
+}
+
+static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
+ struct tsm_dsm_tio *dev_data)
+{
+ int rc;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+
+ if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
+ return -EIO;
+
+ if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
+ int rc1, rc2;
+
+ rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
+ if (rc1 < 0)
+ return rc1;
+
+ rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
+ if (rc2 < 0)
+ return rc2;
+
+ if (!rc1 && !rc2)
+ /* Neither buffer requires expansion, this is wrong */
+ return -EFAULT;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+ }
+
+ if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ struct spdm_dobj_hdr_req *req_hdr;
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t resp_len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
+
+ if (!dev_data->cmd) {
+ if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
+ return -EINVAL;
+ if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
+ return -EFAULT;
+ memcpy(dev_data->cmd_data, data, data_len);
+ memset(&dev_data->cmd_data[data_len], 0xFF,
+ sizeof(dev_data->cmd_data) - data_len);
+ dev_data->cmd = cmd;
+ }
+
+ req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ switch (req_hdr->data_type) {
+ case DOBJ_DATA_TYPE_SPDM:
+ rc = PCI_DOE_FEATURE_CMA;
+ break;
+ case DOBJ_DATA_TYPE_SECURE_SPDM:
+ rc = PCI_DOE_FEATURE_SSESSION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ resp_hdr->data_type = req_hdr->data_type;
+ dev_data->spdm.req_len = req_hdr->hdr.length -
+ sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
+ dev_data->spdm.rsp_len = resp_len;
+ } else if (dev_data && dev_data->cmd) {
+ /* For either error or success just stop the bouncing */
+ memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
+ dev_data->cmd = 0;
+ }
+
+ return rc;
+}
+
+int sev_tio_continue(struct tsm_dsm_tio *dev_data)
+{
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ int ret;
+
+ if (!dev_data || !dev_data->cmd)
+ return -EINVAL;
+
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ dev_data->spdm.rsp_len, 32);
+ dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
+
+ ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
+ &dev_data->psp_ret, dev_data);
+ if (ret)
+ return ret;
+
+ if (dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
+{
+ ctrl->req = dev_data->req;
+ ctrl->resp = dev_data->resp;
+ ctrl->scratch = dev_data->scratch;
+ ctrl->output = dev_data->output;
+}
+
+static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ sizeof(struct sla_buffer_hdr));
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
+ sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
+ spdm->rsp = NULL;
+ spdm->req = NULL;
+ sla_free(dev_data->req, len, true);
+ sla_free(dev_data->resp, len, false);
+ sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
+
+ dev_data->req.sla = 0;
+ dev_data->resp.sla = 0;
+ dev_data->scratch.sla = 0;
+ dev_data->respbuf = NULL;
+ dev_data->reqbuf = NULL;
+ sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
+}
+
+static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct tsm_spdm *spdm = &dev_data->spdm;
+ int ret;
+
+ dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
+ dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
+ dev_data->scratch_len = tio_status->spdm_scratch_size_max;
+ dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
+ dev_data->output_len = tio_status->spdm_out_size_max;
+ dev_data->output = sla_alloc(dev_data->output_len, true);
+
+ if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
+ IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
+ ret = -ENOMEM;
+ goto free_spdm_exit;
+ }
+
+ dev_data->reqbuf = sla_buffer_map(dev_data->req);
+ dev_data->respbuf = sla_buffer_map(dev_data->resp);
+ if (!dev_data->reqbuf || !dev_data->respbuf) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
+ spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
+ if (!spdm->req || !spdm->rsp) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ dobj_response_init(dev_data->respbuf);
+
+ return 0;
+
+free_spdm_exit:
+ spdm_ctrl_free(dev_data);
+ return ret;
+}
+
+int sev_tio_init_locked(void *tio_status_page)
+{
+ struct sev_tio_status *tio_status = tio_status_page;
+ struct sev_data_tio_status data_status = {
+ .length = sizeof(data_status),
+ };
+ int ret, psp_ret;
+
+ data_status.status_paddr = __psp_pa(tio_status_page);
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
+ tio_status->reserved)
+ return -EFAULT;
+
+ if (!tio_status->tio_en && !tio_status->tio_init_done)
+ return -ENOENT;
+
+ if (tio_status->tio_init_done)
+ return -EBUSY;
+
+ struct sev_data_tio_init ti = { .length = sizeof(ti) };
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
+ if (ret)
+ return ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
+ u16 root_port_id, u8 segment_id)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_create create = {
+ .length = sizeof(create),
+ .device_id = device_id,
+ .root_port_id = root_port_id,
+ .segment_id = segment_id,
+ };
+ void *data_pg;
+ int ret;
+
+ dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return -ENOMEM;
+
+ data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
+ if (!data_pg) {
+ ret = -ENOMEM;
+ goto free_ctx_exit;
+ }
+
+ create.dev_ctx_sla = dev_data->dev_ctx;
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
+ if (ret)
+ goto free_data_pg_exit;
+
+ dev_data->data_pg = data_pg;
+
+ return 0;
+
+free_data_pg_exit:
+ snp_free_firmware_page(data_pg);
+free_ctx_exit:
+ sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
+ return ret;
+}
+
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_reclaim r = {
+ .length = sizeof(r),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ };
+ int ret;
+
+ if (dev_data->data_pg) {
+ snp_free_firmware_page(dev_data->data_pg);
+ dev_data->data_pg = NULL;
+ }
+
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return 0;
+
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
+
+ sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
+ dev_data->dev_ctx = SLA_NULL;
+
+ spdm_ctrl_free(dev_data);
+
+ return ret;
+}
+
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
+{
+ struct sev_data_tio_dev_connect connect = {
+ .length = sizeof(connect),
+ .tc_mask = tc_mask,
+ .cert_slot = cert_slot,
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .ide_stream_id = {
+ ids[0], ids[1], ids[2], ids[3],
+ ids[4], ids[5], ids[6], ids[7]
+ },
+ };
+ int ret;
+
+ if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+ if (!(tc_mask & 1))
+ return -EINVAL;
+
+ ret = spdm_ctrl_alloc(dev_data);
+ if (ret)
+ return ret;
+
+ spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
+{
+ struct sev_data_tio_dev_disconnect dc = {
+ .length = sizeof(dc),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
+ };
+
+ if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+
+ spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
+ case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
+ case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
+ case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
+ case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
+ case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
+ default: return 0;
+ }
+}
diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h
new file mode 100644
index 000000000000..67512b3dbc53
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PSP_SEV_TIO_H__
+#define __PSP_SEV_TIO_H__
+
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+#include <linux/tsm.h>
+#include <uapi/linux/psp-sev.h>
+
+struct sla_addr_t {
+ union {
+ u64 sla;
+ struct {
+ u64 page_type :1,
+ page_size :1,
+ reserved1 :10,
+ pfn :40,
+ reserved2 :12;
+ };
+ };
+} __packed;
+
+#define SEV_TIO_MAX_COMMAND_LENGTH 128
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+};
+
+/* Describes TIO device */
+struct tsm_dsm_tio {
+ u8 cert_slot;
+ struct sla_addr_t dev_ctx;
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+ size_t output_len;
+ size_t scratch_len;
+ struct tsm_spdm spdm;
+ struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
+ struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
+
+ int cmd;
+ int psp_ret;
+ u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
+ void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
+
+#define TIO_IDE_MAX_TC 8
+ struct pci_ide *ide[TIO_IDE_MAX_TC];
+};
+
+/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
+struct tio_dsm {
+ struct pci_tsm_pf0 tsm;
+ struct tsm_dsm_tio data;
+ struct sev_device *sev;
+};
+
+/* Data object IDs */
+#define SPDM_DOBJ_ID_NONE 0
+#define SPDM_DOBJ_ID_REQ 1
+#define SPDM_DOBJ_ID_RESP 2
+
+struct spdm_dobj_hdr {
+ u32 id; /* Data object type identifier */
+ u32 length; /* Length of the data object, INCLUDING THIS HEADER */
+ struct { /* Version of the data object structure */
+ u8 minor;
+ u8 major;
+ } version;
+} __packed;
+
+/**
+ * struct sev_tio_status - TIO_STATUS command's info_paddr buffer
+ *
+ * @length: Length of this structure in bytes
+ * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
+ * @tio_init_done: Indicates TIO_INIT has been invoked
+ * @spdm_req_size_min: Minimum SPDM request buffer size in bytes
+ * @spdm_req_size_max: Maximum SPDM request buffer size in bytes
+ * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
+ * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
+ * @spdm_out_size_min: Minimum SPDM output buffer size in bytes
+ * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
+ * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
+ * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
+ * @devctx_size: Size of a device context buffer in bytes
+ * @tdictx_size: Size of a TDI context buffer in bytes
+ * @tio_crypto_alg: TIO crypto algorithms supported
+ */
+struct sev_tio_status {
+ u32 length;
+ u32 tio_en :1,
+ tio_init_done :1,
+ reserved :30;
+ u32 spdm_req_size_min;
+ u32 spdm_req_size_max;
+ u32 spdm_scratch_size_min;
+ u32 spdm_scratch_size_max;
+ u32 spdm_out_size_min;
+ u32 spdm_out_size_max;
+ u32 spdm_rsp_size_min;
+ u32 spdm_rsp_size_max;
+ u32 devctx_size;
+ u32 tdictx_size;
+ u32 tio_crypto_alg;
+ u8 reserved2[12];
+} __packed;
+
+int sev_tio_init_locked(void *tio_status_page);
+int sev_tio_continue(struct tsm_dsm_tio *dev_data);
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
+ u8 segment_id);
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
+
+#endif /* __PSP_SEV_TIO_H__ */
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
new file mode 100644
index 000000000000..ea29cd5d0ff9
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to CCP/SEV-TIO for generic PCIe TDISP module
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/tsm.h>
+#include <linux/iommu.h>
+#include <linux/pci-doe.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+MODULE_IMPORT_NS("PCI_IDE");
+
+#define TIO_DEFAULT_NR_IDE_STREAMS 1
+
+static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
+module_param_named(ide_nr, nr_ide_streams, uint, 0644);
+MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
+
+#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
+#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
+#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
+#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
+
+#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
+
+static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
+{
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ /* Check the main command handler response before entering the loop */
+ if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ if (ret <= 0)
+ return ret;
+
+ /* ret > 0 means "SPDM requested" */
+ while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
+ ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (ret < 0)
+ break;
+
+ WARN_ON_ONCE(ret == 0); /* The response should never be empty */
+ spdm->rsp_len = ret;
+ ret = sev_tio_continue(dev_data);
+ }
+
+ return ret;
+}
+
+static int stream_enable(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+ int ret;
+
+ ret = pci_ide_stream_enable(rp, ide);
+ if (ret)
+ return ret;
+
+ ret = pci_ide_stream_enable(ide->pdev, ide);
+ if (ret)
+ pci_ide_stream_disable(rp, ide);
+
+ return ret;
+}
+
+static int streams_enable(struct pci_ide **ide)
+{
+ int ret = 0;
+
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = stream_enable(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stream_disable(struct pci_ide *ide)
+{
+ pci_ide_stream_disable(ide->pdev, ide);
+ pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_disable(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ stream_disable(ide[i]);
+}
+
+static void stream_setup(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ ide->partner[PCI_IDE_EP].rid_start = 0;
+ ide->partner[PCI_IDE_EP].rid_end = 0xffff;
+ ide->partner[PCI_IDE_RP].rid_start = 0;
+ ide->partner[PCI_IDE_RP].rid_end = 0xffff;
+
+ ide->pdev->ide_cfg = 0;
+ ide->pdev->ide_tee_limit = 1;
+ rp->ide_cfg = 1;
+ rp->ide_tee_limit = 0;
+
+ pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
+ pci_ide_stream_setup(ide->pdev, ide);
+ pci_ide_stream_setup(rp, ide);
+}
+
+static u8 streams_setup(struct pci_ide **ide, u8 *ids)
+{
+ bool def = false;
+ u8 tc_mask = 0;
+ int i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (!ide[i]) {
+ ids[i] = 0xFF;
+ continue;
+ }
+
+ tc_mask |= BIT(i);
+ ids[i] = ide[i]->stream_id;
+
+ if (!def) {
+ struct pci_ide_partner *settings;
+
+ settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
+ settings->default_stream = 1;
+ def = true;
+ }
+
+ stream_setup(ide[i]);
+ }
+
+ return tc_mask;
+}
+
+static int streams_register(struct pci_ide **ide)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = pci_ide_stream_register(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void streams_unregister(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ pci_ide_stream_unregister(ide[i]);
+}
+
+static void stream_teardown(struct pci_ide *ide)
+{
+ pci_ide_stream_teardown(ide->pdev, ide);
+ pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_teardown(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ stream_teardown(ide[i]);
+ pci_ide_stream_free(ide[i]);
+ ide[i] = NULL;
+ }
+ }
+}
+
+static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
+ unsigned int tc)
+{
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_ide *ide1;
+
+ if (ide[tc]) {
+ pci_err(pdev, "Stream for class=%d already registered", tc);
+ return -EBUSY;
+ }
+
+ /* FIXME: find a better way */
+ if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
+ pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
+ pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
+
+ ide1 = pci_ide_stream_alloc(pdev);
+ if (!ide1)
+ return -EFAULT;
+
+ /* Blindly assign streamid=0 to TC=0, and so on */
+ ide1->stream_id = tc;
+
+ ide[tc] = ide1;
+
+ return 0;
+}
+
+static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
+{
+ struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
+ int rc;
+
+ if (!dsm)
+ return NULL;
+
+ rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
+ if (rc)
+ return NULL;
+
+ pci_dbg(pdev, "TSM enabled\n");
+ dsm->sev = sev;
+ return &no_free_ptr(dsm)->tsm.base_tsm;
+}
+
+static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
+{
+ struct sev_device *sev = tsm_dev_to_sev(tsmdev);
+
+ if (is_pci_tsm_pf0(pdev))
+ return tio_pf0_probe(pdev, sev);
+ return 0;
+}
+
+static void dsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev = tsm->pdev;
+
+ pci_dbg(pdev, "TSM disabled\n");
+
+ if (is_pci_tsm_pf0(pdev)) {
+ struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
+
+ pci_tsm_pf0_destructor(&dsm->tsm);
+ kfree(dsm);
+ }
+}
+
+static int dsm_create(struct tio_dsm *dsm)
+{
+ struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
+ u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
+ struct pci_dev *rootport = pcie_find_root_port(pdev);
+ u16 device_id = pci_dev_id(pdev);
+ u16 root_port_id;
+ u32 lnkcap = 0;
+
+ if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENODEV;
+
+ root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+
+ return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
+}
+
+static int dsm_connect(struct pci_dev *pdev)
+{
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ u8 ids[TIO_IDE_MAX_TC];
+ u8 tc_mask;
+ int ret;
+
+ if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
+ pci_err(pdev, "CMA DOE MB must support SSESSION\n");
+ return -EFAULT;
+ }
+
+ ret = stream_alloc(pdev, dev_data->ide, 0);
+ if (ret)
+ return ret;
+
+ ret = dsm_create(dsm);
+ if (ret)
+ goto ide_free_exit;
+
+ tc_mask = streams_setup(dev_data->ide, ids);
+
+ ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret)
+ goto free_exit;
+
+ streams_enable(dev_data->ide);
+
+ ret = streams_register(dev_data->ide);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ide_free_exit:
+
+ streams_teardown(dev_data->ide);
+
+ return ret;
+}
+
+static void dsm_disconnect(struct pci_dev *pdev)
+{
+ bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ int ret;
+
+ ret = sev_tio_dev_disconnect(dev_data, force);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret && !force) {
+ ret = sev_tio_dev_disconnect(dev_data, true);
+ sev_tio_spdm_cmd(dsm, ret);
+ }
+
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ streams_unregister(dev_data->ide);
+ streams_teardown(dev_data->ide);
+}
+
+static struct pci_tsm_ops sev_tsm_ops = {
+ .probe = dsm_probe,
+ .remove = dsm_remove,
+ .connect = dsm_connect,
+ .disconnect = dsm_disconnect,
+};
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+{
+ struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct tsm_dev *tsmdev;
+ int ret;
+
+ WARN_ON(sev->tio_status);
+
+ if (!t)
+ return;
+
+ ret = sev_tio_init_locked(tio_status_page);
+ if (ret) {
+ pr_warn("SEV-TIO STATUS failed with %d\n", ret);
+ goto error_exit;
+ }
+
+ tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
+ if (IS_ERR(tsmdev))
+ goto error_exit;
+
+ memcpy(t, tio_status_page, sizeof(*t));
+
+ pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
+ "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
+ t->tio_en, t->tio_init_done,
+ t->spdm_req_size_min, t->spdm_req_size_max,
+ t->spdm_rsp_size_min, t->spdm_rsp_size_max,
+ t->spdm_scratch_size_min, t->spdm_scratch_size_max,
+ t->spdm_out_size_min, t->spdm_out_size_max,
+ t->devctx_size, t->tdictx_size,
+ t->tio_crypto_alg);
+
+ sev->tsmdev = tsmdev;
+ sev->tio_status = t;
+
+ return;
+
+error_exit:
+ kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
+}
+
+void sev_tsm_uninit(struct sev_device *sev)
+{
+ if (sev->tsmdev)
+ tsm_unregister(sev->tsmdev);
+
+ sev->tsmdev = NULL;
+}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 06fc7156c04f..956ea609d0cc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -7,6 +7,7 @@
* Author: Brijesh Singh <brijesh.singh@amd.com>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
@@ -20,12 +21,20 @@
#include <linux/hw_random.h>
#include <linux/ccp.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include <linux/gfp.h>
#include <linux/cpufeature.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
+#include <linux/psp.h>
+#include <linux/amd-iommu.h>
+#include <linux/crash_dump.h>
#include <asm/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/e820/types.h>
+#include <asm/sev.h>
+#include <asm/msr.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -34,6 +43,19 @@
#define SEV_FW_FILE "amd/sev.fw"
#define SEV_FW_NAME_SIZE 64
+/* Minimum firmware version required for the SEV-SNP support */
+#define SNP_MIN_API_MAJOR 1
+#define SNP_MIN_API_MINOR 51
+
+/*
+ * Maximum number of firmware-writable buffers that might be specified
+ * in the parameters of a legacy SEV command buffer.
+ */
+#define CMD_BUF_FW_WRITABLE_MAX 2
+
+/* Leave room in the descriptor array for an end-of-list indicator. */
+#define CMD_BUF_DESC_MAX (CMD_BUF_FW_WRITABLE_MAX + 1)
+
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -53,20 +75,49 @@ static bool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+#if IS_ENABLED(CONFIG_PCI_TSM)
+static bool sev_tio_enabled = true;
+module_param_named(tio, sev_tio_enabled, bool, 0444);
+MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
+#else
+static const bool sev_tio_enabled = false;
+#endif
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
+enum snp_hv_fixed_pages_state {
+ ALLOCATED,
+ HV_FIXED,
+};
+
+struct snp_hv_fixed_pages_entry {
+ struct list_head list;
+ struct page *page;
+ unsigned int order;
+ bool free;
+ enum snp_hv_fixed_pages_state page_state;
+};
+
+static LIST_HEAD(snp_hv_fixed_pages);
+
/* Trusted Memory Region (TMR):
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
* allocation order.
+ *
+ * When SEV-SNP is enabled the TMR needs to be 2MB aligned and 2MB sized.
*/
-#define SEV_ES_TMR_SIZE (1024 * 1024)
+#define SEV_TMR_SIZE (1024 * 1024)
+#define SNP_TMR_SIZE (2 * 1024 * 1024)
+
static void *sev_es_tmr;
+static size_t sev_es_tmr_size = SEV_TMR_SIZE;
/* INIT_EX NV Storage:
* The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page
@@ -76,6 +127,22 @@ static void *sev_es_tmr;
#define NV_LENGTH (32 * 1024)
static void *sev_init_ex_buffer;
+/*
+ * SEV_DATA_RANGE_LIST:
+ * Array containing range of pages that firmware transitions to HV-fixed
+ * page state.
+ */
+static struct sev_data_range_list *snp_range_list;
+
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic);
+
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg);
+
+static struct notifier_block snp_panic_notifier = {
+ .notifier_call = snp_shutdown_on_panic,
+};
+
static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
{
struct sev_device *sev = psp_master->sev_data;
@@ -100,7 +167,7 @@ static void sev_irq_handler(int irq, void *data, unsigned int status)
/* Check if it is SEV command completion: */
reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
- if (reg & PSP_CMDRESP_RESP) {
+ if (FIELD_GET(PSP_CMDRESP_RESP, reg)) {
sev->int_rcvd = 1;
wake_up(&sev->int_queue);
}
@@ -111,6 +178,25 @@ static int sev_wait_cmd_ioc(struct sev_device *sev,
{
int ret;
+ /*
+ * If invoked during panic handling, local interrupts are disabled,
+ * so the PSP command completion interrupt can't be used. Poll for
+ * PSP command completion instead.
+ */
+ if (irqs_disabled()) {
+ unsigned long timeout_usecs = (timeout * USEC_PER_SEC) / 10;
+
+ /* Poll for SEV command completion: */
+ while (timeout_usecs--) {
+ *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
+ if (*reg & PSP_CMDRESP_RESP)
+ return 0;
+
+ udelay(10);
+ }
+ return -ETIMEDOUT;
+ }
+
ret = wait_event_timeout(sev->int_queue,
sev->int_rcvd, timeout * HZ);
if (!ret)
@@ -126,6 +212,8 @@ static int sev_cmd_buffer_len(int cmd)
switch (cmd) {
case SEV_CMD_INIT: return sizeof(struct sev_data_init);
case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex);
+ case SEV_CMD_SNP_SHUTDOWN_EX: return sizeof(struct sev_data_snp_shutdown_ex);
+ case SEV_CMD_SNP_INIT_EX: return sizeof(struct sev_data_snp_init_ex);
case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
@@ -154,46 +242,45 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
- default: return 0;
+ case SEV_CMD_SNP_GCTX_CREATE: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_LAUNCH_START: return sizeof(struct sev_data_snp_launch_start);
+ case SEV_CMD_SNP_LAUNCH_UPDATE: return sizeof(struct sev_data_snp_launch_update);
+ case SEV_CMD_SNP_ACTIVATE: return sizeof(struct sev_data_snp_activate);
+ case SEV_CMD_SNP_DECOMMISSION: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_PAGE_RECLAIM: return sizeof(struct sev_data_snp_page_reclaim);
+ case SEV_CMD_SNP_GUEST_STATUS: return sizeof(struct sev_data_snp_guest_status);
+ case SEV_CMD_SNP_LAUNCH_FINISH: return sizeof(struct sev_data_snp_launch_finish);
+ case SEV_CMD_SNP_DBG_DECRYPT: return sizeof(struct sev_data_snp_dbg);
+ case SEV_CMD_SNP_DBG_ENCRYPT: return sizeof(struct sev_data_snp_dbg);
+ case SEV_CMD_SNP_PAGE_UNSMASH: return sizeof(struct sev_data_snp_page_unsmash);
+ case SEV_CMD_SNP_PLATFORM_STATUS: return sizeof(struct sev_data_snp_addr);
+ case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
+ case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
+ case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
+ case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
+ case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
+ default: return sev_tio_cmd_buffer_len(cmd);
}
return 0;
}
-static void *sev_fw_alloc(unsigned long len)
-{
- struct page *page;
-
- page = alloc_pages(GFP_KERNEL, get_order(len));
- if (!page)
- return NULL;
-
- return page_address(page);
-}
-
static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
{
- struct file *fp;
- struct path root;
- struct cred *cred;
- const struct cred *old_cred;
+ struct path root __free(path_put) = {};
task_lock(&init_task);
get_fs_root(init_task.fs, &root);
task_unlock(&init_task);
- cred = prepare_creds();
+ CLASS(prepare_creds, cred)();
if (!cred)
return ERR_PTR(-ENOMEM);
- cred->fsuid = GLOBAL_ROOT_UID;
- old_cred = override_creds(cred);
-
- fp = file_open_root(&root, filename, flags, mode);
- path_put(&root);
- revert_creds(old_cred);
+ cred->fsuid = GLOBAL_ROOT_UID;
- return fp;
+ scoped_with_creds(cred)
+ return file_open_root(&root, filename, flags, mode);
}
static int sev_read_init_ex_file(void)
@@ -301,13 +388,482 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+{
+ int ret, err, i;
+
+ paddr = __sme_clr(ALIGN_DOWN(paddr, PAGE_SIZE));
+
+ for (i = 0; i < npages; i++, paddr += PAGE_SIZE) {
+ struct sev_data_snp_page_reclaim data = {0};
+
+ data.paddr = paddr;
+
+ if (locked)
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+ else
+ ret = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+
+ if (ret)
+ goto cleanup;
+
+ ret = rmp_make_shared(__phys_to_pfn(paddr), PG_LEVEL_4K);
+ if (ret)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /*
+ * If there was a failure reclaiming the page then it is no longer safe
+ * to release it back to the system; leak it instead.
+ */
+ snp_leak_pages(__phys_to_pfn(paddr), npages - i);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snp_reclaim_pages);
+
+static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
+{
+ unsigned long pfn = __sme_clr(paddr) >> PAGE_SHIFT;
+ int rc, i;
+
+ for (i = 0; i < npages; i++, pfn++) {
+ rc = rmp_make_private(pfn, 0, PG_LEVEL_4K, 0, true);
+ if (rc)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ /*
+ * Try unrolling the firmware state changes by
+ * reclaiming the pages which were already changed to the
+ * firmware state.
+ */
+ snp_reclaim_pages(paddr, i, locked);
+
+ return rc;
+}
+
+static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked)
+{
+ unsigned long npages = 1ul << order, paddr;
+ struct sev_device *sev;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ page = alloc_pages(gfp_mask, order);
+ if (!page)
+ return NULL;
+
+ /* If SEV-SNP is initialized then add the page in RMP table. */
+ sev = psp_master->sev_data;
+ if (!sev->snp_initialized)
+ return page;
+
+ paddr = __pa((unsigned long)page_address(page));
+ if (rmp_mark_pages_firmware(paddr, npages, locked))
+ return NULL;
+
+ return page;
+}
+
+void *snp_alloc_firmware_page(gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = __snp_alloc_firmware_pages(gfp_mask, 0, false);
+
+ return page ? page_address(page) : NULL;
+}
+EXPORT_SYMBOL_GPL(snp_alloc_firmware_page);
+
+static void __snp_free_firmware_pages(struct page *page, int order, bool locked)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ unsigned long paddr, npages = 1ul << order;
+
+ if (!page)
+ return;
+
+ paddr = __pa((unsigned long)page_address(page));
+ if (sev->snp_initialized &&
+ snp_reclaim_pages(paddr, npages, locked))
+ return;
+
+ __free_pages(page, order);
+}
+
+void snp_free_firmware_page(void *addr)
+{
+ if (!addr)
+ return;
+
+ __snp_free_firmware_pages(virt_to_page(addr), 0, false);
+}
+EXPORT_SYMBOL_GPL(snp_free_firmware_page);
+
+static void *sev_fw_alloc(unsigned long len)
+{
+ struct page *page;
+
+ page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true);
+ if (!page)
+ return NULL;
+
+ return page_address(page);
+}
+
+/**
+ * struct cmd_buf_desc - descriptors for managing legacy SEV command address
+ * parameters corresponding to buffers that may be written to by firmware.
+ *
+ * @paddr_ptr: pointer to the address parameter in the command buffer which may
+ * need to be saved/restored depending on whether a bounce buffer
+ * is used. In the case of a bounce buffer, the command buffer
+ * needs to be updated with the address of the new bounce buffer
+ * snp_map_cmd_buf_desc() has allocated specifically for it. Must
+ * be NULL if this descriptor is only an end-of-list indicator.
+ *
+ * @paddr_orig: storage for the original address parameter, which can be used to
+ * restore the original value in @paddr_ptr in cases where it is
+ * replaced with the address of a bounce buffer.
+ *
+ * @len: length of buffer located at the address originally stored at @paddr_ptr
+ *
+ * @guest_owned: true if the address corresponds to guest-owned pages, in which
+ * case bounce buffers are not needed.
+ */
+struct cmd_buf_desc {
+ u64 *paddr_ptr;
+ u64 paddr_orig;
+ u32 len;
+ bool guest_owned;
+};
+
+/*
+ * If a legacy SEV command parameter is a memory address, those pages in
+ * turn need to be transitioned to/from firmware-owned before/after
+ * executing the firmware command.
+ *
+ * Additionally, in cases where those pages are not guest-owned, a bounce
+ * buffer is needed in place of the original memory address parameter.
+ *
+ * A set of descriptors are used to keep track of this handling, and
+ * initialized here based on the specific commands being executed.
+ */
+static void snp_populate_cmd_buf_desc_list(int cmd, void *cmd_buf,
+ struct cmd_buf_desc *desc_list)
+{
+ switch (cmd) {
+ case SEV_CMD_PDH_CERT_EXPORT: {
+ struct sev_data_pdh_cert_export *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->pdh_cert_address;
+ desc_list[0].len = data->pdh_cert_len;
+ desc_list[1].paddr_ptr = &data->cert_chain_address;
+ desc_list[1].len = data->cert_chain_len;
+ break;
+ }
+ case SEV_CMD_GET_ID: {
+ struct sev_data_get_id *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_PEK_CSR: {
+ struct sev_data_pek_csr *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_DATA: {
+ struct sev_data_launch_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: {
+ struct sev_data_launch_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_LAUNCH_MEASURE: {
+ struct sev_data_launch_measure *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: {
+ struct sev_data_launch_secret *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_DBG_DECRYPT: {
+ struct sev_data_dbg *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->dst_addr;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_DBG_ENCRYPT: {
+ struct sev_data_dbg *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->dst_addr;
+ desc_list[0].len = data->len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_ATTESTATION_REPORT: {
+ struct sev_data_attestation_report *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->address;
+ desc_list[0].len = data->len;
+ break;
+ }
+ case SEV_CMD_SEND_START: {
+ struct sev_data_send_start *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->session_address;
+ desc_list[0].len = data->session_len;
+ break;
+ }
+ case SEV_CMD_SEND_UPDATE_DATA: {
+ struct sev_data_send_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->hdr_address;
+ desc_list[0].len = data->hdr_len;
+ desc_list[1].paddr_ptr = &data->trans_address;
+ desc_list[1].len = data->trans_len;
+ break;
+ }
+ case SEV_CMD_SEND_UPDATE_VMSA: {
+ struct sev_data_send_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->hdr_address;
+ desc_list[0].len = data->hdr_len;
+ desc_list[1].paddr_ptr = &data->trans_address;
+ desc_list[1].len = data->trans_len;
+ break;
+ }
+ case SEV_CMD_RECEIVE_UPDATE_DATA: {
+ struct sev_data_receive_update_data *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: {
+ struct sev_data_receive_update_vmsa *data = cmd_buf;
+
+ desc_list[0].paddr_ptr = &data->guest_address;
+ desc_list[0].len = data->guest_len;
+ desc_list[0].guest_owned = true;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int snp_map_cmd_buf_desc(struct cmd_buf_desc *desc)
+{
+ unsigned int npages;
+
+ if (!desc->len)
+ return 0;
+
+ /* Allocate a bounce buffer if this isn't a guest owned page. */
+ if (!desc->guest_owned) {
+ struct page *page;
+
+ page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(desc->len));
+ if (!page) {
+ pr_warn("Failed to allocate bounce buffer for SEV legacy command.\n");
+ return -ENOMEM;
+ }
+
+ desc->paddr_orig = *desc->paddr_ptr;
+ *desc->paddr_ptr = __psp_pa(page_to_virt(page));
+ }
+
+ npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
+
+ /* Transition the buffer to firmware-owned. */
+ if (rmp_mark_pages_firmware(*desc->paddr_ptr, npages, true)) {
+ pr_warn("Error moving pages to firmware-owned state for SEV legacy command.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int snp_unmap_cmd_buf_desc(struct cmd_buf_desc *desc)
+{
+ unsigned int npages;
+
+ if (!desc->len)
+ return 0;
+
+ npages = PAGE_ALIGN(desc->len) >> PAGE_SHIFT;
+
+ /* Transition the buffers back to hypervisor-owned. */
+ if (snp_reclaim_pages(*desc->paddr_ptr, npages, true)) {
+ pr_warn("Failed to reclaim firmware-owned pages while issuing SEV legacy command.\n");
+ return -EFAULT;
+ }
+
+ /* Copy data from bounce buffer and then free it. */
+ if (!desc->guest_owned) {
+ void *bounce_buf = __va(__sme_clr(*desc->paddr_ptr));
+ void *dst_buf = __va(__sme_clr(desc->paddr_orig));
+
+ memcpy(dst_buf, bounce_buf, desc->len);
+ __free_pages(virt_to_page(bounce_buf), get_order(desc->len));
+
+ /* Restore the original address in the command buffer. */
+ *desc->paddr_ptr = desc->paddr_orig;
+ }
+
+ return 0;
+}
+
+static int snp_map_cmd_buf_desc_list(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
+{
+ int i;
+
+ snp_populate_cmd_buf_desc_list(cmd, cmd_buf, desc_list);
+
+ for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
+ struct cmd_buf_desc *desc = &desc_list[i];
+
+ if (!desc->paddr_ptr)
+ break;
+
+ if (snp_map_cmd_buf_desc(desc))
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--)
+ snp_unmap_cmd_buf_desc(&desc_list[i]);
+
+ return -EFAULT;
+}
+
+static int snp_unmap_cmd_buf_desc_list(struct cmd_buf_desc *desc_list)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < CMD_BUF_DESC_MAX; i++) {
+ struct cmd_buf_desc *desc = &desc_list[i];
+
+ if (!desc->paddr_ptr)
+ break;
+
+ if (snp_unmap_cmd_buf_desc(&desc_list[i]))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static bool sev_cmd_buf_writable(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_PLATFORM_STATUS:
+ case SEV_CMD_GUEST_STATUS:
+ case SEV_CMD_LAUNCH_START:
+ case SEV_CMD_RECEIVE_START:
+ case SEV_CMD_LAUNCH_MEASURE:
+ case SEV_CMD_SEND_START:
+ case SEV_CMD_SEND_UPDATE_DATA:
+ case SEV_CMD_SEND_UPDATE_VMSA:
+ case SEV_CMD_PEK_CSR:
+ case SEV_CMD_PDH_CERT_EXPORT:
+ case SEV_CMD_GET_ID:
+ case SEV_CMD_ATTESTATION_REPORT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* After SNP is INIT'ed, the behavior of legacy SEV commands is changed. */
+static bool snp_legacy_handling_needed(int cmd)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ return cmd < SEV_CMD_SNP_INIT && sev->snp_initialized;
+}
+
+static int snp_prep_cmd_buf(int cmd, void *cmd_buf, struct cmd_buf_desc *desc_list)
+{
+ if (!snp_legacy_handling_needed(cmd))
+ return 0;
+
+ if (snp_map_cmd_buf_desc_list(cmd, cmd_buf, desc_list))
+ return -EFAULT;
+
+ /*
+ * Before command execution, the command buffer needs to be put into
+ * the firmware-owned state.
+ */
+ if (sev_cmd_buf_writable(cmd)) {
+ if (rmp_mark_pages_firmware(__pa(cmd_buf), 1, true))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
+{
+ if (!snp_legacy_handling_needed(cmd))
+ return 0;
+
+ /*
+ * After command completion, the command buffer needs to be put back
+ * into the hypervisor-owned state.
+ */
+ if (sev_cmd_buf_writable(cmd))
+ if (snp_reclaim_pages(__pa(cmd_buf), 1, true))
+ return -EFAULT;
+
+ return 0;
+}
+
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
+ struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
struct sev_device *sev;
+ unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
+ unsigned int reg;
+ void *cmd_buf;
int buf_len;
+ int ret = 0;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -326,12 +882,47 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
* work for some memory, e.g. vmalloc'd addresses, and @data may not be
* physically contiguous.
*/
- if (data)
- memcpy(sev->cmd_buf, data, buf_len);
+ if (data) {
+ /*
+ * Commands are generally issued one at a time and require the
+ * sev_cmd_mutex, but there could be recursive firmware requests
+ * due to SEV_CMD_SNP_PAGE_RECLAIM needing to be issued while
+ * preparing buffers for another command. This is the only known
+ * case of nesting in the current code, so exactly one
+ * additional command buffer is available for that purpose.
+ */
+ if (!sev->cmd_buf_active) {
+ cmd_buf = sev->cmd_buf;
+ sev->cmd_buf_active = true;
+ } else if (!sev->cmd_buf_backup_active) {
+ cmd_buf = sev->cmd_buf_backup;
+ sev->cmd_buf_backup_active = true;
+ } else {
+ dev_err(sev->dev,
+ "SEV: too many firmware commands in progress, no command buffers available.\n");
+ return -EBUSY;
+ }
+
+ memcpy(cmd_buf, data, buf_len);
+
+ /*
+ * The behavior of the SEV-legacy commands is altered when the
+ * SNP firmware is in the INIT state.
+ */
+ ret = snp_prep_cmd_buf(cmd, cmd_buf, desc_list);
+ if (ret) {
+ dev_err(sev->dev,
+ "SEV: failed to prepare buffer for legacy command 0x%x. Error: %d\n",
+ cmd, ret);
+ return ret;
+ }
+ } else {
+ cmd_buf = sev->cmd_buf;
+ }
/* Get the physical address of the command buffer */
- phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0;
- phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0;
+ phys_lsb = data ? lower_32_bits(__psp_pa(cmd_buf)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(cmd_buf)) : 0;
dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
cmd, phys_msb, phys_lsb, psp_timeout);
@@ -344,9 +935,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
sev->int_rcvd = 0;
- reg = cmd;
- reg <<= SEV_CMDRESP_CMD_SHIFT;
- reg |= SEV_CMDRESP_IOC;
+ reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd);
+
+ /*
+ * If invoked during panic handling, local interrupts are disabled so
+ * the PSP command completion interrupt can't be used.
+ * sev_wait_cmd_ioc() already checks for interrupts disabled and
+ * polls for PSP command completion. Ensure we do not request an
+ * interrupt from the PSP if irqs disabled.
+ */
+ if (!irqs_disabled())
+ reg |= SEV_CMDRESP_IOC;
+
iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
/* wait for command completion */
@@ -364,30 +964,64 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
psp_timeout = psp_cmd_timeout;
if (psp_ret)
- *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+ *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg);
+
+ if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
+ cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
- cmd, reg & PSP_CMDRESP_ERR_MASK);
+ /*
+ * PSP firmware may report additional error information in the
+ * command buffer registers on error. Print contents of command
+ * buffer registers if they changed.
+ */
+ cmdbuff_hi = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+ cmdbuff_lo = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ if (cmdbuff_hi != phys_msb || cmdbuff_lo != phys_lsb) {
+ dev_dbg(sev->dev, "Additional error information reported in cmdbuff:");
+ dev_dbg(sev->dev, " cmdbuff hi: %#010x\n", cmdbuff_hi);
+ dev_dbg(sev->dev, " cmdbuff lo: %#010x\n", cmdbuff_lo);
+ }
ret = -EIO;
} else {
ret = sev_write_init_ex_file_if_required(cmd);
}
- print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
- buf_len, false);
-
/*
* Copy potential output from the PSP back to data. Do this even on
* failure in case the caller wants to glean something from the error.
*/
- if (data)
- memcpy(data, sev->cmd_buf, buf_len);
+ if (data) {
+ int ret_reclaim;
+ /*
+ * Restore the page state after the command completes.
+ */
+ ret_reclaim = snp_reclaim_cmd_buf(cmd, cmd_buf);
+ if (ret_reclaim) {
+ dev_err(sev->dev,
+ "SEV: failed to reclaim buffer for legacy command %#x. Error: %d\n",
+ cmd, ret_reclaim);
+ return ret_reclaim;
+ }
+
+ memcpy(data, cmd_buf, buf_len);
+
+ if (sev->cmd_buf_backup_active)
+ sev->cmd_buf_backup_active = false;
+ else
+ sev->cmd_buf_active = false;
+
+ if (snp_unmap_cmd_buf_desc_list(desc_list))
+ return -EFAULT;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ buf_len, false);
return ret;
}
-static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+int sev_do_cmd(int cmd, void *data, int *psp_ret)
{
int rc;
@@ -397,6 +1031,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret)
return rc;
}
+EXPORT_SYMBOL_GPL(sev_do_cmd);
static int __sev_init_locked(int *error)
{
@@ -411,7 +1046,7 @@ static int __sev_init_locked(int *error)
data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_len = SEV_ES_TMR_SIZE;
+ data.tmr_len = sev_es_tmr_size;
}
return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error);
@@ -434,37 +1069,533 @@ static int __sev_init_ex_locked(int *error)
data.tmr_address = __pa(sev_es_tmr);
data.flags |= SEV_INIT_FLAGS_SEV_ES;
- data.tmr_len = SEV_ES_TMR_SIZE;
+ data.tmr_len = sev_es_tmr_size;
}
return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error);
}
-static int __sev_platform_init_locked(int *error)
+static inline int __sev_do_init_locked(int *psp_ret)
+{
+ if (sev_init_ex_buffer)
+ return __sev_init_ex_locked(psp_ret);
+ else
+ return __sev_init_locked(psp_ret);
+}
+
+static void snp_set_hsave_pa(void *arg)
+{
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
+}
+
+/* Hypervisor Fixed pages API interface */
+static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
+ enum snp_hv_fixed_pages_state page_state)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ entry->page_state = page_state;
+}
+
+/*
+ * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
+ * 2MB pages are marked as HV_FIXED.
+ */
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_device *sev;
+ unsigned int order;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ sev = psp_master->sev_data;
+
+ order = get_order(PMD_SIZE * num_2mb_pages);
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ /*
+ * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
+ * page state, fail if SNP is already initialized.
+ */
+ if (sev->snp_initialized)
+ return NULL;
+
+ /* Re-use freed pages that match the request */
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ /* Hypervisor fixed page allocator implements exact fit policy */
+ if (entry->order == order && entry->free) {
+ entry->free = false;
+ memset(page_address(entry->page), 0,
+ (1 << entry->order) * PAGE_SIZE);
+ return entry->page;
+ }
+ }
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ __free_pages(page, order);
+ return NULL;
+ }
+
+ entry->page = page;
+ entry->order = order;
+ list_add_tail(&entry->list, &snp_hv_fixed_pages);
+
+ return page;
+}
+
+void snp_free_hv_fixed_pages(struct page *page)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry, *nentry;
+
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
+ if (entry->page != page)
+ continue;
+
+ /*
+ * HV_FIXED page state cannot be changed until reboot
+ * and they cannot be used by an SNP guest, so they cannot
+ * be returned back to the page allocator.
+ * Mark the pages as free internally to allow possible re-use.
+ */
+ if (entry->page_state == HV_FIXED) {
+ entry->free = true;
+ } else {
+ __free_pages(page, entry->order);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return;
+ }
+}
+
+static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_data_range *range;
+ int num_elements;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ num_elements = list_count_nodes(&snp_hv_fixed_pages) +
+ range_list->num_elements;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
+ dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
+ return;
+ }
+
+ range = &range_list->ranges[range_list->num_elements];
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
+ range->page_count = 1 << entry->order;
+ range++;
+ }
+ range_list->num_elements = num_elements;
+}
+
+static void snp_leak_hv_fixed_pages(void)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ if (entry->page_state == HV_FIXED)
+ __snp_leak_pages(page_to_pfn(entry->page),
+ 1 << entry->order, false);
+}
+
+bool sev_is_snp_ciphertext_hiding_supported(void)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
- int rc = 0, psp_ret = -1;
- int (*init_function)(int *error);
if (!psp || !psp->sev_data)
+ return false;
+
+ sev = psp->sev_data;
+
+ /*
+ * Feature information indicates if CipherTextHiding feature is
+ * supported by the SEV firmware and additionally platform status
+ * indicates if CipherTextHiding feature is enabled in the
+ * Platform BIOS.
+ */
+ return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) &&
+ sev->snp_plat_status.ciphertext_hiding_cap);
+}
+EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported);
+
+static int snp_get_platform_data(struct sev_device *sev, int *error)
+{
+ struct sev_data_snp_feature_info snp_feat_info;
+ struct snp_feature_info *feat_info;
+ struct sev_data_snp_addr buf;
+ struct page *page;
+ int rc;
+
+ /*
+ * This function is expected to be called before SNP is
+ * initialized.
+ */
+ if (sev->snp_initialized)
+ return -EINVAL;
+
+ buf.address = __psp_pa(&sev->snp_plat_status);
+ rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error);
+ if (rc) {
+ dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n",
+ rc, *error);
+ return rc;
+ }
+
+ sev->api_major = sev->snp_plat_status.api_major;
+ sev->api_minor = sev->snp_plat_status.api_minor;
+ sev->build = sev->snp_plat_status.build_id;
+
+ /*
+ * Do feature discovery of the currently loaded firmware,
+ * and cache feature information from CPUID 0x8000_0024,
+ * sub-function 0.
+ */
+ if (!sev->snp_plat_status.feature_info)
+ return 0;
+
+ /*
+ * Use dynamically allocated structure for the SNP_FEATURE_INFO
+ * command to ensure structure is 8-byte aligned, and does not
+ * cross a page boundary.
+ */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ feat_info = page_address(page);
+ snp_feat_info.length = sizeof(snp_feat_info);
+ snp_feat_info.ecx_in = 0;
+ snp_feat_info.feature_info_paddr = __psp_pa(feat_info);
+
+ rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error);
+ if (!rc)
+ sev->snp_feat_info_0 = *feat_info;
+ else
+ dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n",
+ rc, *error);
+
+ __free_page(page);
+
+ return rc;
+}
+
+static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
+{
+ struct sev_data_range_list *range_list = arg;
+ struct sev_data_range *range = &range_list->ranges[range_list->num_elements];
+ size_t size;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if ((range_list->num_elements * sizeof(struct sev_data_range) +
+ sizeof(struct sev_data_range_list)) > PAGE_SIZE)
+ return -E2BIG;
+
+ switch (rs->desc) {
+ case E820_TYPE_RESERVED:
+ case E820_TYPE_PMEM:
+ case E820_TYPE_ACPI:
+ range->base = rs->start & PAGE_MASK;
+ size = PAGE_ALIGN((rs->end + 1) - rs->start);
+ range->page_count = size >> PAGE_SHIFT;
+ range_list->num_elements++;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_data_snp_init_ex data;
+ struct sev_device *sev;
+ void *arg = &data;
+ int cmd, rc = 0;
+
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return -ENODEV;
sev = psp->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->snp_initialized)
return 0;
- if (sev_init_ex_buffer) {
- init_function = __sev_init_ex_locked;
- rc = sev_read_init_ex_file();
- if (rc)
+ if (!sev_version_greater_or_equal(SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR)) {
+ dev_dbg(sev->dev, "SEV-SNP support requires firmware version >= %d:%d\n",
+ SNP_MIN_API_MAJOR, SNP_MIN_API_MINOR);
+ return -EOPNOTSUPP;
+ }
+
+ /* SNP_INIT requires MSR_VM_HSAVE_PA to be cleared on all CPUs. */
+ on_each_cpu(snp_set_hsave_pa, NULL, 1);
+
+ /*
+ * Starting in SNP firmware v1.52, the SNP_INIT_EX command takes a list
+ * of system physical address ranges to convert into HV-fixed page
+ * states during the RMP initialization. For instance, the memory that
+ * UEFI reserves should be included in the that list. This allows system
+ * components that occasionally write to memory (e.g. logging to UEFI
+ * reserved regions) to not fail due to RMP initialization and SNP
+ * enablement.
+ *
+ */
+ if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
+
+ /*
+ * Firmware checks that the pages containing the ranges enumerated
+ * in the RANGES structure are either in the default page state or in the
+ * firmware page state.
+ */
+ snp_range_list = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!snp_range_list) {
+ dev_err(sev->dev,
+ "SEV: SNP_INIT_EX range list memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Retrieve all reserved memory regions from the e820 memory map
+ * to be setup as HV-fixed pages.
+ */
+ rc = walk_iomem_res_desc(IORES_DESC_NONE, IORESOURCE_MEM, 0, ~0,
+ snp_range_list, snp_filter_reserved_mem_regions);
+ if (rc) {
+ dev_err(sev->dev,
+ "SEV: SNP_INIT_EX walk_iomem_res_desc failed rc = %d\n", rc);
return rc;
+ }
+
+ /*
+ * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
+ * HV_Fixed page list.
+ */
+ snp_add_hv_fixed_pages(sev, snp_range_list);
+
+ memset(&data, 0, sizeof(data));
+
+ if (max_snp_asid) {
+ data.ciphertext_hiding_en = 1;
+ data.max_snp_asid = max_snp_asid;
+ }
+
+ data.init_rmp = 1;
+ data.list_paddr_en = 1;
+ data.list_paddr = __psp_pa(snp_range_list);
+
+ data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
+
+ /*
+ * When psp_init_on_probe is disabled, the userspace calling
+ * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
+ * unexpected state loss.
+ */
+ if (data.tio_en && !psp_init_on_probe)
+ dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
+
+ cmd = SEV_CMD_SNP_INIT_EX;
+ } else {
+ cmd = SEV_CMD_SNP_INIT;
+ arg = NULL;
+ }
+
+ /*
+ * The following sequence must be issued before launching the first SNP
+ * guest to ensure all dirty cache lines are flushed, including from
+ * updates to the RMP table itself via the RMPUPDATE instruction:
+ *
+ * - WBINVD on all running CPUs
+ * - SEV_CMD_SNP_INIT[_EX] firmware command
+ * - WBINVD on all running CPUs
+ * - SEV_CMD_SNP_DF_FLUSH firmware command
+ */
+ wbinvd_on_all_cpus();
+
+ rc = __sev_do_cmd_locked(cmd, arg, error);
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: %s failed rc %d, error %#x\n",
+ cmd == SEV_CMD_SNP_INIT_EX ? "SNP_INIT_EX" : "SNP_INIT",
+ rc, *error);
+ return rc;
+ }
+
+ /* Prepare for first SNP guest launch after INIT. */
+ wbinvd_on_all_cpus();
+ rc = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, error);
+ if (rc) {
+ dev_err(sev->dev, "SEV-SNP: SNP_DF_FLUSH failed rc %d, error %#x\n",
+ rc, *error);
+ return rc;
+ }
+
+ snp_hv_fixed_pages_state_update(sev, HV_FIXED);
+ sev->snp_initialized = true;
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
+ data.tio_en ? "enabled" : "disabled");
+
+ dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
+ sev->api_minor, sev->build);
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ if (data.tio_en) {
+ /*
+ * This executes with the sev_cmd_mutex held so down the stack
+ * snp_reclaim_pages(locked=false) might be needed (which is extremely
+ * unlikely) but will cause a deadlock.
+ * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
+ * for this one call here.
+ */
+ void *tio_status = page_address(__snp_alloc_firmware_pages(
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
+
+ if (tio_status) {
+ sev_tsm_init_locked(sev, tio_status);
+ __snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
+ }
+ }
+
+ sev_es_tmr_size = SNP_TMR_SIZE;
+
+ return 0;
+}
+
+static void __sev_platform_init_handle_tmr(struct sev_device *sev)
+{
+ if (sev_es_tmr)
+ return;
+
+ /* Obtain the TMR memory area for SEV-ES use */
+ sev_es_tmr = sev_fw_alloc(sev_es_tmr_size);
+ if (sev_es_tmr) {
+ /* Must flush the cache before giving it to the firmware */
+ if (!sev->snp_initialized)
+ clflush_cache_range(sev_es_tmr, sev_es_tmr_size);
} else {
- init_function = __sev_init_locked;
+ dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n");
+ }
+}
+
+/*
+ * If an init_ex_path is provided allocate a buffer for the file and
+ * read in the contents. Additionally, if SNP is initialized, convert
+ * the buffer pages to firmware pages.
+ */
+static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev)
+{
+ struct page *page;
+ int rc;
+
+ if (!init_ex_path)
+ return 0;
+
+ if (sev_init_ex_buffer)
+ return 0;
+
+ page = alloc_pages(GFP_KERNEL, get_order(NV_LENGTH));
+ if (!page) {
+ dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ sev_init_ex_buffer = page_address(page);
+
+ rc = sev_read_init_ex_file();
+ if (rc)
+ return rc;
+
+ /* If SEV-SNP is initialized, transition to firmware page. */
+ if (sev->snp_initialized) {
+ unsigned long npages;
+
+ npages = 1UL << get_order(NV_LENGTH);
+ if (rmp_mark_pages_firmware(__pa(sev_init_ex_buffer), npages, false)) {
+ dev_err(sev->dev, "SEV: INIT_EX NV memory page state change failed.\n");
+ return -ENOMEM;
+ }
}
- rc = init_function(&psp_ret);
+ return 0;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ int rc, psp_ret, dfflush_error;
+ struct sev_device *sev;
+
+ psp_ret = dfflush_error = SEV_RET_NO_FW_CALL;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ sev = psp_master->sev_data;
+
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
+ return 0;
+
+ __sev_platform_init_handle_tmr(sev);
+
+ rc = __sev_platform_init_handle_init_ex_path(sev);
+ if (rc)
+ return rc;
+
+ rc = __sev_do_init_locked(&psp_ret);
if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) {
/*
* Initialization command returned an integrity check failure
@@ -473,22 +1604,30 @@ static int __sev_platform_init_locked(int *error)
* initialization function should succeed by replacing the state
* with a reset state.
*/
- dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
- rc = init_function(&psp_ret);
+ dev_err(sev->dev,
+"SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state.");
+ rc = __sev_do_init_locked(&psp_ret);
}
+
if (error)
*error = psp_ret;
- if (rc)
+ if (rc) {
+ dev_err(sev->dev, "SEV: %s failed %#x, rc %d\n",
+ sev_init_ex_buffer ? "INIT_EX" : "INIT", psp_ret, rc);
return rc;
+ }
- sev->state = SEV_STATE_INIT;
+ sev->sev_plat_status.state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
- rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
- if (rc)
+ rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error);
+ if (rc) {
+ dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n",
+ dfflush_error, rc);
return rc;
+ }
dev_dbg(sev->dev, "SEV firmware initialized\n");
@@ -498,12 +1637,45 @@ static int __sev_platform_init_locked(int *error)
return 0;
}
-int sev_platform_init(int *error)
+static int _sev_platform_init_locked(struct sev_platform_init_args *args)
+{
+ struct sev_device *sev;
+ int rc;
+
+ if (!psp_master || !psp_master->sev_data)
+ return -ENODEV;
+
+ /*
+ * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
+ * may already be initialized in the previous kernel. Since no
+ * SNP/SEV guests are run under a kdump kernel, there is no
+ * need to initialize SNP or SEV during kdump boot.
+ */
+ if (is_kdump_kernel())
+ return 0;
+
+ sev = psp_master->sev_data;
+
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_snp_init_locked(&args->error, args->max_snp_asid);
+ if (rc && rc != -ENODEV)
+ return rc;
+
+ /* Defer legacy SEV/SEV-ES support if allowed by caller/module. */
+ if (args->probe && !psp_init_on_probe)
+ return 0;
+
+ return __sev_platform_init_locked(&args->error);
+}
+
+int sev_platform_init(struct sev_platform_init_args *args)
{
int rc;
mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_init_locked(error);
+ rc = _sev_platform_init_locked(args);
mutex_unlock(&sev_cmd_mutex);
return rc;
@@ -512,44 +1684,73 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
static int __sev_platform_shutdown_locked(int *error)
{
- struct sev_device *sev = psp_master->sev_data;
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
int ret;
- if (!sev || sev->state == SEV_STATE_UNINIT)
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
- if (ret)
+ if (ret) {
+ dev_err(sev->dev, "SEV: failed to SHUTDOWN error %#x, rc %d\n",
+ *error, ret);
return ret;
+ }
- sev->state = SEV_STATE_UNINIT;
+ sev->sev_plat_status.state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
return ret;
}
-static int sev_platform_shutdown(int *error)
+static int sev_get_platform_state(int *state, int *error)
{
+ struct sev_user_data_status data;
int rc;
- mutex_lock(&sev_cmd_mutex);
- rc = __sev_platform_shutdown_locked(NULL);
- mutex_unlock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error);
+ if (rc)
+ return rc;
+ *state = data.state;
return rc;
}
-static int sev_get_platform_state(int *state, int *error)
+static int sev_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
{
- struct sev_user_data_status data;
+ struct sev_platform_init_args init_args = {0};
int rc;
- rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error);
- if (rc)
+ rc = _sev_platform_init_locked(&init_args);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
return rc;
+ }
- *state = data.state;
- return rc;
+ *shutdown_required = true;
+
+ return 0;
+}
+
+static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_required)
+{
+ int error, rc;
+
+ rc = __sev_snp_init_locked(&error, 0);
+ if (rc) {
+ argp->error = SEV_RET_INVALID_PLATFORM_STATE;
+ return rc;
+ }
+
+ *shutdown_required = true;
+
+ return 0;
}
static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
@@ -604,24 +1805,31 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
int rc;
if (!writable)
return -EPERM;
- if (sev->state == SEV_STATE_UNINIT) {
- rc = __sev_platform_init_locked(&argp->error);
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
+ rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
}
- return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ rc = __sev_do_cmd_locked(cmd, NULL, &argp->error);
+
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
+ return rc;
}
static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
+ bool shutdown_required = false;
struct sev_data_pek_csr data;
void __user *input_address;
void *blob = NULL;
@@ -652,8 +1860,8 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
data.len = input.length;
cmd:
- if (sev->state == SEV_STATE_UNINIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
}
@@ -674,6 +1882,9 @@ cmd:
}
e_free_blob:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(blob);
return ret;
}
@@ -697,6 +1908,16 @@ static int sev_get_api_version(void)
struct sev_user_data_status status;
int error = 0, ret;
+ /*
+ * Cache SNP platform status and SNP feature information
+ * if SNP is available.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) {
+ ret = snp_get_platform_data(sev, &error);
+ if (ret)
+ return 1;
+ }
+
ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
@@ -704,10 +1925,12 @@ static int sev_get_api_version(void)
return 1;
}
+ /* Cache SEV platform status */
+ sev->sev_plat_status = status;
+
sev->api_major = status.api_major;
sev->api_minor = status.api_minor;
sev->build = status.build;
- sev->state = status.state;
return 0;
}
@@ -803,8 +2026,6 @@ static int sev_update_firmware(struct device *dev)
if (ret)
dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
- else
- dev_info(dev, "SEV firmware update successful\n");
__free_pages(p, order);
@@ -814,11 +2035,101 @@ fw_err:
return ret;
}
+static int __sev_snp_shutdown_locked(int *error, bool panic)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ struct sev_data_snp_shutdown_ex data;
+ int ret;
+
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ if (!sev->snp_initialized)
+ return 0;
+
+ memset(&data, 0, sizeof(data));
+ data.len = sizeof(data);
+ data.iommu_snp_shutdown = 1;
+
+ /*
+ * If invoked during panic handling, local interrupts are disabled
+ * and all CPUs are stopped, so wbinvd_on_all_cpus() can't be called.
+ * In that case, a wbinvd() is done on remote CPUs via the NMI
+ * callback, so only a local wbinvd() is needed here.
+ */
+ if (!panic)
+ wbinvd_on_all_cpus();
+ else
+ wbinvd();
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data, error);
+ /* SHUTDOWN may require DF_FLUSH */
+ if (*error == SEV_RET_DFFLUSH_REQUIRED) {
+ int dfflush_error = SEV_RET_NO_FW_CALL;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_DF_FLUSH, NULL, &dfflush_error);
+ if (ret) {
+ dev_err(sev->dev, "SEV-SNP DF_FLUSH failed, ret = %d, error = %#x\n",
+ ret, dfflush_error);
+ return ret;
+ }
+ /* reissue the shutdown command */
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_SHUTDOWN_EX, &data,
+ error);
+ }
+ if (ret) {
+ dev_err(sev->dev, "SEV-SNP firmware shutdown failed, rc %d, error %#x\n",
+ ret, *error);
+ return ret;
+ }
+
+ /*
+ * SNP_SHUTDOWN_EX with IOMMU_SNP_SHUTDOWN set to 1 disables SNP
+ * enforcement by the IOMMU and also transitions all pages
+ * associated with the IOMMU to the Reclaim state.
+ * Firmware was transitioning the IOMMU pages to Hypervisor state
+ * before version 1.53. But, accounting for the number of assigned
+ * 4kB pages in a 2M page was done incorrectly by not transitioning
+ * to the Reclaim state. This resulted in RMP #PF when later accessing
+ * the 2M page containing those pages during kexec boot. Hence, the
+ * firmware now transitions these pages to Reclaim state and hypervisor
+ * needs to transition these pages to shared state. SNP Firmware
+ * version 1.53 and above are needed for kexec boot.
+ */
+ ret = amd_iommu_snp_disable();
+ if (ret) {
+ dev_err(sev->dev, "SNP IOMMU shutdown failed\n");
+ return ret;
+ }
+
+ snp_leak_hv_fixed_pages();
+ sev->snp_initialized = false;
+ dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
+
+ /*
+ * __sev_snp_shutdown_locked() deadlocks when it tries to unregister
+ * itself during panic as the panic notifier is called with RCU read
+ * lock held and notifier unregistration does RCU synchronization.
+ */
+ if (!panic)
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &snp_panic_notifier);
+
+ /* Reset TMR size back to default */
+ sev_es_tmr_size = SEV_TMR_SIZE;
+
+ return ret;
+}
+
static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
{
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_cert_import input;
struct sev_data_pek_cert_import data;
+ bool shutdown_required = false;
void *pek_blob, *oca_blob;
int ret;
@@ -848,8 +2159,8 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
- if (sev->state != SEV_STATE_INIT) {
- ret = __sev_platform_init_locked(&argp->error);
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
+ ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
}
@@ -857,6 +2168,9 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error);
e_free_oca:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(oca_blob);
e_free_pek:
kfree(pek_blob);
@@ -881,7 +2195,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
input_address = (void __user *)input.address;
if (input.address && input.length) {
- id_blob = kzalloc(input.length, GFP_KERNEL);
+ /*
+ * The length of the ID shouldn't be assumed by software since
+ * it may change in the future. The allocation size is limited
+ * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator.
+ * If the allocation fails, simply return ENOMEM rather than
+ * warning in the kernel log.
+ */
+ id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN);
if (!id_blob)
return -ENOMEM;
@@ -966,32 +2287,23 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_data_pdh_cert_export data;
void __user *input_cert_chain_address;
void __user *input_pdh_cert_address;
+ bool shutdown_required = false;
int ret;
- /* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
- if (!writable)
- return -EPERM;
-
- ret = __sev_platform_init_locked(&argp->error);
- if (ret)
- return ret;
- }
-
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
memset(&data, 0, sizeof(data));
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Userspace wants to query the certificate length. */
if (!input.pdh_cert_address ||
!input.pdh_cert_len ||
!input.cert_chain_address)
goto cmd;
- input_pdh_cert_address = (void __user *)input.pdh_cert_address;
- input_cert_chain_address = (void __user *)input.cert_chain_address;
-
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE)
return -EFAULT;
@@ -1017,6 +2329,17 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
data.cert_chain_len = input.cert_chain_len;
cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
+ if (!writable) {
+ ret = -EPERM;
+ goto e_free_cert;
+ }
+ ret = sev_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto e_free_cert;
+ }
+
ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error);
/* If we query the length, FW responded with expected data. */
@@ -1043,12 +2366,171 @@ cmd:
}
e_free_cert:
+ if (shutdown_required)
+ __sev_firmware_shutdown(sev, false);
+
kfree(cert_blob);
e_free_pdh:
kfree(pdh_blob);
return ret;
}
+static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ bool shutdown_required = false;
+ struct sev_data_snp_addr buf;
+ struct page *status_page;
+ int ret, error;
+ void *data;
+
+ if (!argp->data)
+ return -EINVAL;
+
+ status_page = alloc_page(GFP_KERNEL_ACCOUNT);
+ if (!status_page)
+ return -ENOMEM;
+
+ data = page_address(status_page);
+
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
+ /*
+ * Firmware expects status page to be in firmware-owned state, otherwise
+ * it will report firmware error code INVALID_PAGE_STATE (0x1A).
+ */
+ if (rmp_mark_pages_firmware(__pa(data), 1, true)) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ buf.address = __psp_pa(data);
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_PLATFORM_STATUS, &buf, &argp->error);
+
+ /*
+ * Status page will be transitioned to Reclaim state upon success, or
+ * left in Firmware state in failure. Use snp_reclaim_pages() to
+ * transition either case back to Hypervisor-owned state.
+ */
+ if (snp_reclaim_pages(__pa(data), 1, true))
+ return -EFAULT;
+
+ if (ret)
+ goto cleanup;
+
+ if (copy_to_user((void __user *)argp->data, data,
+ sizeof(struct sev_user_data_snp_status)))
+ ret = -EFAULT;
+
+cleanup:
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ __free_pages(status_page, 0);
+ return ret;
+}
+
+static int sev_ioctl_do_snp_commit(struct sev_issue_cmd *argp)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_data_snp_commit buf;
+ bool shutdown_required = false;
+ int ret, error;
+
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ buf.len = sizeof(buf);
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_COMMIT, &buf, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
+}
+
+static int sev_ioctl_do_snp_set_config(struct sev_issue_cmd *argp, bool writable)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_snp_config config;
+ bool shutdown_required = false;
+ int ret, error;
+
+ if (!argp->data)
+ return -EINVAL;
+
+ if (!writable)
+ return -EPERM;
+
+ if (copy_from_user(&config, (void __user *)argp->data, sizeof(config)))
+ return -EFAULT;
+
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ return ret;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_CONFIG, &config, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+ return ret;
+}
+
+static int sev_ioctl_do_snp_vlek_load(struct sev_issue_cmd *argp, bool writable)
+{
+ struct sev_device *sev = psp_master->sev_data;
+ struct sev_user_data_snp_vlek_load input;
+ bool shutdown_required = false;
+ int ret, error;
+ void *blob;
+
+ if (!argp->data)
+ return -EINVAL;
+
+ if (!writable)
+ return -EPERM;
+
+ if (copy_from_user(&input, u64_to_user_ptr(argp->data), sizeof(input)))
+ return -EFAULT;
+
+ if (input.len != sizeof(input) || input.vlek_wrapped_version != 0)
+ return -EINVAL;
+
+ blob = psp_copy_user_blob(input.vlek_wrapped_address,
+ sizeof(struct sev_user_data_snp_wrapped_vlek_hashstick));
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
+
+ input.vlek_wrapped_address = __psp_pa(blob);
+
+ if (!sev->snp_initialized) {
+ ret = snp_move_to_init_state(argp, &shutdown_required);
+ if (ret)
+ goto cleanup;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SNP_VLEK_LOAD, &input, &argp->error);
+
+ if (shutdown_required)
+ __sev_snp_shutdown_locked(&error, false);
+
+cleanup:
+ kfree(blob);
+
+ return ret;
+}
+
static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -1100,6 +2582,18 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
case SEV_GET_ID2:
ret = sev_ioctl_do_get_id2(&input);
break;
+ case SNP_PLATFORM_STATUS:
+ ret = sev_ioctl_do_snp_platform_status(&input);
+ break;
+ case SNP_COMMIT:
+ ret = sev_ioctl_do_snp_commit(&input);
+ break;
+ case SNP_SET_CONFIG:
+ ret = sev_ioctl_do_snp_set_config(&input, writable);
+ break;
+ case SNP_VLEK_LOAD:
+ ret = sev_ioctl_do_snp_vlek_load(&input, writable);
+ break;
default:
ret = -EINVAL;
goto out;
@@ -1210,10 +2704,12 @@ int sev_dev_init(struct psp_device *psp)
if (!sev)
goto e_err;
- sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+ sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 1);
if (!sev->cmd_buf)
goto e_sev;
+ sev->cmd_buf_backup = (uint8_t *)sev->cmd_buf + PAGE_SIZE;
+
psp->sev_data = sev;
sev->dev = dev;
@@ -1252,26 +2748,111 @@ e_err:
return ret;
}
-static void sev_firmware_shutdown(struct sev_device *sev)
+static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
- sev_platform_shutdown(NULL);
+ int error;
+
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
- /* The TMR area was encrypted, flush it from the cache */
- wbinvd_on_all_cpus();
+ /*
+ * The TMR area was encrypted, flush it from the cache.
+ *
+ * If invoked during panic handling, local interrupts are
+ * disabled and all CPUs are stopped, so wbinvd_on_all_cpus()
+ * can't be used. In that case, wbinvd() is done on remote CPUs
+ * via the NMI callback, and done for this CPU later during
+ * SNP shutdown, so wbinvd_on_all_cpus() can be skipped.
+ */
+ if (!panic)
+ wbinvd_on_all_cpus();
- free_pages((unsigned long)sev_es_tmr,
- get_order(SEV_ES_TMR_SIZE));
+ __snp_free_firmware_pages(virt_to_page(sev_es_tmr),
+ get_order(sev_es_tmr_size),
+ true);
sev_es_tmr = NULL;
}
if (sev_init_ex_buffer) {
- free_pages((unsigned long)sev_init_ex_buffer,
- get_order(NV_LENGTH));
+ __snp_free_firmware_pages(virt_to_page(sev_init_ex_buffer),
+ get_order(NV_LENGTH),
+ true);
sev_init_ex_buffer = NULL;
}
+
+ if (snp_range_list) {
+ kfree(snp_range_list);
+ snp_range_list = NULL;
+ }
+
+ __sev_snp_shutdown_locked(&error, panic);
}
+static void sev_firmware_shutdown(struct sev_device *sev)
+{
+ /*
+ * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
+ * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
+ */
+ if (sev->tio_status)
+ sev_tsm_uninit(sev);
+
+ mutex_lock(&sev_cmd_mutex);
+
+ __sev_firmware_shutdown(sev, false);
+
+ kfree(sev->tio_status);
+ sev->tio_status = NULL;
+
+ mutex_unlock(&sev_cmd_mutex);
+}
+
+void sev_platform_shutdown(void)
+{
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ sev_firmware_shutdown(psp_master->sev_data);
+}
+EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+
+u64 sev_get_snp_policy_bits(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ u64 policy_bits;
+
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ return 0;
+
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ policy_bits = SNP_POLICY_MASK_BASE;
+
+ if (sev->snp_plat_status.feature_info) {
+ if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_RAPL_DIS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM;
+
+ if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CXL_ALLOW;
+
+ if (sev_version_greater_or_equal(1, 58))
+ policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE;
+ }
+
+ return policy_bits;
+}
+EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
@@ -1287,6 +2868,25 @@ void sev_dev_destroy(struct psp_device *psp)
psp_clear_sev_irq_handler(psp);
}
+static int snp_shutdown_on_panic(struct notifier_block *nb,
+ unsigned long reason, void *arg)
+{
+ struct sev_device *sev = psp_master->sev_data;
+
+ /*
+ * If sev_cmd_mutex is already acquired, then it's likely
+ * another PSP command is in flight and issuing a shutdown
+ * would fail in unexpected ways. Rather than create even
+ * more confusion during a panic, just bail out here.
+ */
+ if (mutex_is_locked(&sev_cmd_mutex))
+ return NOTIFY_DONE;
+
+ __sev_firmware_shutdown(sev, true);
+
+ return NOTIFY_DONE;
+}
+
int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
void *data, int *error)
{
@@ -1300,7 +2900,7 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
void sev_pci_init(void)
{
struct sev_device *sev = psp_master->sev_data;
- int error, rc;
+ u8 api_major, api_minor, build;
if (!sev)
return;
@@ -1310,39 +2910,24 @@ void sev_pci_init(void)
if (sev_get_api_version())
goto err;
+ api_major = sev->api_major;
+ api_minor = sev->api_minor;
+ build = sev->build;
+
if (sev_update_firmware(sev->dev) == 0)
sev_get_api_version();
- /* If an init_ex_path is provided rely on INIT_EX for PSP initialization
- * instead of INIT.
- */
- if (init_ex_path) {
- sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH);
- if (!sev_init_ex_buffer) {
- dev_err(sev->dev,
- "SEV: INIT_EX NV memory allocation failed\n");
- goto err;
- }
- }
-
- /* Obtain the TMR memory area for SEV-ES use */
- sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
- if (!sev_es_tmr)
- dev_warn(sev->dev,
- "SEV: TMR allocation failed, SEV-ES support unavailable\n");
-
- if (!psp_init_on_probe)
- return;
-
- /* Initialize the platform */
- rc = sev_platform_init(&error);
- if (rc)
- dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n",
- error, rc);
+ if (api_major != sev->api_major || api_minor != sev->api_minor ||
+ build != sev->build)
+ dev_info(sev->dev, "SEV firmware updated from %d.%d.%d to %d.%d.%d\n",
+ api_major, api_minor, build,
+ sev->api_major, sev->api_minor, sev->build);
return;
err:
+ sev_dev_destroy(psp_master);
+
psp_master->sev_data = NULL;
}
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 666c21eb81ab..b1cd556bbbf6 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -25,8 +25,8 @@
#include <linux/miscdevice.h>
#include <linux/capability.h>
+#define SEV_CMDRESP_CMD GENMASK(26, 16)
#define SEV_CMD_COMPLETE BIT(1)
-#define SEV_CMDRESP_CMD_SHIFT 16
#define SEV_CMDRESP_IOC BIT(0)
struct sev_misc_dev {
@@ -34,6 +34,8 @@ struct sev_misc_dev {
struct miscdevice misc;
};
+struct sev_tio_status;
+
struct sev_device {
struct device *dev;
struct psp_device *psp;
@@ -42,7 +44,6 @@ struct sev_device {
struct sev_vdata *vdata;
- int state;
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
@@ -52,12 +53,34 @@ struct sev_device {
u8 build;
void *cmd_buf;
+ void *cmd_buf_backup;
+ bool cmd_buf_active;
+ bool cmd_buf_backup_active;
+
+ bool snp_initialized;
+
+ struct sev_user_data_status sev_plat_status;
+
+ struct sev_user_data_snp_status snp_plat_status;
+ struct snp_feature_info snp_feat_info_0;
+
+ struct tsm_dev *tsmdev;
+ struct sev_tio_status *tio_status;
};
int sev_dev_init(struct psp_device *psp);
void sev_dev_destroy(struct psp_device *psp);
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
void sev_pci_init(void);
void sev_pci_exit(void);
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
+void snp_free_hv_fixed_pages(struct page *page);
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
+void sev_tsm_uninit(struct sev_device *sev);
+int sev_tio_cmd_buffer_len(int cmd);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sfs.c b/drivers/crypto/ccp/sfs.c
new file mode 100644
index 000000000000..2f4beaafe7ec
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Seamless Firmware Servicing support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "sfs.h"
+#include "sev-dev.h"
+
+#define SFS_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
+#define SFS_MAX_PAYLOAD_SIZE (2 * 1024 * 1024)
+#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE)
+#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE)
+
+static DEFINE_MUTEX(sfs_ioctl_mutex);
+
+static struct sfs_misc_dev *misc_dev;
+
+static int send_sfs_cmd(struct sfs_device *sfs_dev, int msg)
+{
+ int ret;
+
+ sfs_dev->command_buf->hdr.status = 0;
+ sfs_dev->command_buf->hdr.sub_cmd_id = msg;
+
+ ret = psp_extended_mailbox_cmd(sfs_dev->psp,
+ SFS_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)sfs_dev->command_buf);
+ if (ret == -EIO) {
+ dev_dbg(sfs_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x, extended status: 0x%x\n",
+ msg, sfs_dev->command_buf->hdr.status,
+ *(u32 *)sfs_dev->command_buf->buf);
+ }
+
+ return ret;
+}
+
+static int send_sfs_get_fw_versions(struct sfs_device *sfs_dev)
+{
+ /*
+ * SFS_GET_FW_VERSIONS command needs the output buffer to be
+ * initialized to 0xC7 in every byte.
+ */
+ memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE);
+ sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE;
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_GET_FW_VERSIONS);
+}
+
+static int send_sfs_update_package(struct sfs_device *sfs_dev, const char *payload_name)
+{
+ char payload_path[PAYLOAD_NAME_SIZE + sizeof("amd/")];
+ const struct firmware *firmware;
+ unsigned long package_size;
+ int ret;
+
+ /* Sanitize userspace provided payload name */
+ if (!strnchr(payload_name, PAYLOAD_NAME_SIZE, '\0'))
+ return -EINVAL;
+
+ snprintf(payload_path, sizeof(payload_path), "amd/%s", payload_name);
+
+ ret = firmware_request_nowarn(&firmware, payload_path, sfs_dev->dev);
+ if (ret < 0) {
+ dev_warn_ratelimited(sfs_dev->dev, "firmware request failed for %s (%d)\n",
+ payload_path, ret);
+ return -ENOENT;
+ }
+
+ /*
+ * SFS Update Package command's input buffer contains TEE_EXT_CMD_BUFFER
+ * followed by the Update Package and it should be 64KB aligned.
+ */
+ package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U);
+
+ /*
+ * SFS command buffer is a pre-allocated 2MB buffer, fail update package
+ * if SFS payload is larger than the pre-allocated command buffer.
+ */
+ if (package_size > SFS_MAX_PAYLOAD_SIZE) {
+ dev_warn_ratelimited(sfs_dev->dev,
+ "SFS payload size %ld larger than maximum supported payload size of %u\n",
+ package_size, SFS_MAX_PAYLOAD_SIZE);
+ release_firmware(firmware);
+ return -E2BIG;
+ }
+
+ /*
+ * Copy firmware data to a HV_Fixed memory region.
+ */
+ memcpy(sfs_dev->command_buf->sfs_buffer, firmware->data, firmware->size);
+ sfs_dev->command_buf->hdr.payload_size = package_size;
+
+ release_firmware(firmware);
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_UPDATE);
+}
+
+static long sfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sfs_user_get_fw_versions __user *sfs_get_fw_versions;
+ struct sfs_user_update_package __user *sfs_update_package;
+ struct psp_device *psp_master = psp_get_master_device();
+ char payload_name[PAYLOAD_NAME_SIZE];
+ struct sfs_device *sfs_dev;
+ int ret = 0;
+
+ if (!psp_master || !psp_master->sfs_data)
+ return -ENODEV;
+
+ sfs_dev = psp_master->sfs_data;
+
+ guard(mutex)(&sfs_ioctl_mutex);
+
+ switch (cmd) {
+ case SFSIOCFWVERS:
+ dev_dbg(sfs_dev->dev, "in SFSIOCFWVERS\n");
+
+ sfs_get_fw_versions = (struct sfs_user_get_fw_versions __user *)arg;
+
+ ret = send_sfs_get_fw_versions(sfs_dev);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_get_fw_versions->blob, sfs_dev->command_buf->sfs_buffer,
+ PAGE_SIZE))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_get_fw_versions->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_get_fw_versions->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ case SFSIOCUPDATEPKG:
+ dev_dbg(sfs_dev->dev, "in SFSIOCUPDATEPKG\n");
+
+ sfs_update_package = (struct sfs_user_update_package __user *)arg;
+
+ if (copy_from_user(payload_name, sfs_update_package->payload_name,
+ PAYLOAD_NAME_SIZE))
+ return -EFAULT;
+
+ ret = send_sfs_update_package(sfs_dev, payload_name);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_update_package->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_update_package->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_update_package->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_update_package->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations sfs_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sfs_ioctl,
+};
+
+static void sfs_exit(struct kref *ref)
+{
+ misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
+}
+
+void sfs_dev_destroy(struct psp_device *psp)
+{
+ struct sfs_device *sfs_dev = psp->sfs_data;
+
+ if (!sfs_dev)
+ return;
+
+ /*
+ * Change SFS command buffer back to the default "Write-Back" type.
+ */
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+ snp_free_hv_fixed_pages(sfs_dev->page);
+
+ if (sfs_dev->misc)
+ kref_put(&misc_dev->refcount, sfs_exit);
+
+ psp->sfs_data = NULL;
+}
+
+/* Based on sev_misc_init() */
+static int sfs_misc_init(struct sfs_device *sfs)
+{
+ struct device *dev = sfs->dev;
+ int ret;
+
+ /*
+ * SFS feature support can be detected on multiple devices but the SFS
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sfs on the first device probe.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "sfs";
+ misc->fops = &sfs_fops;
+ misc->mode = 0600;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ sfs->misc = misc_dev;
+ dev_dbg(dev, "registered SFS device\n");
+
+ return 0;
+}
+
+int sfs_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sfs_device *sfs_dev;
+ struct page *page;
+ int ret = -ENOMEM;
+
+ sfs_dev = devm_kzalloc(dev, sizeof(*sfs_dev), GFP_KERNEL);
+ if (!sfs_dev)
+ return -ENOMEM;
+
+ /*
+ * Pre-allocate 2MB command buffer for all SFS commands using
+ * SNP HV_Fixed page allocator which also transitions the
+ * SFS command buffer to HV_Fixed page state if SNP is enabled.
+ */
+ page = snp_alloc_hv_fixed_pages(SFS_NUM_2MB_PAGES_CMDBUF);
+ if (!page) {
+ dev_dbg(dev, "Command Buffer HV-Fixed page allocation failed\n");
+ goto cleanup_dev;
+ }
+ sfs_dev->page = page;
+ sfs_dev->command_buf = page_address(page);
+
+ dev_dbg(dev, "Command buffer 0x%px to be marked as HV_Fixed\n", sfs_dev->command_buf);
+
+ /*
+ * SFS command buffer must be mapped as non-cacheable.
+ */
+ ret = set_memory_uc((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+ if (ret) {
+ dev_dbg(dev, "Set memory uc failed\n");
+ goto cleanup_cmd_buf;
+ }
+
+ dev_dbg(dev, "Command buffer 0x%px marked uncacheable\n", sfs_dev->command_buf);
+
+ psp->sfs_data = sfs_dev;
+ sfs_dev->dev = dev;
+ sfs_dev->psp = psp;
+
+ ret = sfs_misc_init(sfs_dev);
+ if (ret)
+ goto cleanup_mem_attr;
+
+ dev_notice(sfs_dev->dev, "SFS support is available\n");
+
+ return 0;
+
+cleanup_mem_attr:
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+cleanup_cmd_buf:
+ snp_free_hv_fixed_pages(page);
+
+cleanup_dev:
+ psp->sfs_data = NULL;
+ devm_kfree(dev, sfs_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/sfs.h b/drivers/crypto/ccp/sfs.h
new file mode 100644
index 000000000000..97704c210efd
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Seamless Firmware (SFS) Support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#ifndef __SFS_H__
+#define __SFS_H__
+
+#include <uapi/linux/psp-sfs.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-platform-access.h>
+#include <linux/set_memory.h>
+
+#include "psp-dev.h"
+
+struct sfs_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sfs_command {
+ struct psp_ext_req_buffer_hdr hdr;
+ u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)];
+ u8 sfs_buffer[];
+} __packed;
+
+struct sfs_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct page *page;
+ struct sfs_command *command_buf;
+
+ struct sfs_misc_dev *misc;
+};
+
+void sfs_dev_destroy(struct psp_device *psp);
+int sfs_dev_init(struct psp_device *psp);
+
+#endif /* __SFS_H__ */
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 7eb3e4668286..3467f6db4f50 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/ccp.h>
+#include "sev-dev.h"
#include "ccp-dev.h"
#include "sp-dev.h"
@@ -253,8 +254,12 @@ unlock:
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
+ static bool initialized;
int ret;
+ if (initialized)
+ return 0;
+
ret = sp_pci_init();
if (ret)
return ret;
@@ -263,6 +268,8 @@ static int __init sp_mod_init(void)
psp_pci_init();
#endif
+ initialized = true;
+
return 0;
#endif
@@ -279,6 +286,13 @@ static int __init sp_mod_init(void)
return -ENODEV;
}
+#if IS_BUILTIN(CONFIG_KVM_AMD) && IS_ENABLED(CONFIG_KVM_AMD_SEV)
+int __init sev_module_init(void)
+{
+ return sp_mod_init();
+}
+#endif
+
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 20377e67f65d..1335a83fe052 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -28,6 +28,11 @@
#define CACHE_NONE 0x00
#define CACHE_WB_NO_ALLOC 0xb7
+#define PLATFORM_FEATURE_DBC 0x1
+#define PLATFORM_FEATURE_HSTI 0x2
+
+#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
+
/* Structure to hold CCP device data */
struct ccp_device;
struct ccp_vdata {
@@ -51,14 +56,30 @@ struct tee_vdata {
const unsigned int cmdbuff_addr_hi_reg;
const unsigned int ring_wptr_reg;
const unsigned int ring_rptr_reg;
+ const unsigned int info_reg;
+};
+
+struct platform_access_vdata {
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
+ const unsigned int doorbell_button_reg;
+ const unsigned int doorbell_cmd_reg;
+
};
struct psp_vdata {
const struct sev_vdata *sev;
const struct tee_vdata *tee;
+ const struct platform_access_vdata *platform_access;
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
+ const unsigned int bootloader_info_reg;
+ const unsigned int platform_features;
};
/* Structure to hold SP device data */
@@ -74,7 +95,7 @@ struct sp_device {
struct device *dev;
- struct sp_dev_vdata *dev_vdata;
+ const struct sp_dev_vdata *dev_vdata;
unsigned int ord;
char name[SP_MAX_NAME_LEN];
@@ -117,7 +138,6 @@ struct sp_device *sp_alloc_struct(struct device *dev);
int sp_init(struct sp_device *sp);
void sp_destroy(struct sp_device *sp);
-struct sp_device *sp_get_master(void);
int sp_suspend(struct sp_device *sp);
int sp_resume(struct sp_device *sp);
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 084d052fddcc..8891ceee1d7d 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -8,6 +8,7 @@
* Author: Gary R Hook <gary.hook@amd.com>
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
@@ -23,6 +24,13 @@
#include "ccp-dev.h"
#include "psp-dev.h"
+#include "hsti.h"
+
+/* used for version string AA.BB.CC.DD */
+#define AA GENMASK(31, 24)
+#define BB GENMASK(23, 16)
+#define CC GENMASK(15, 8)
+#define DD GENMASK(7, 0)
#define MSIX_VECTORS 2
@@ -32,64 +40,66 @@ struct sp_pci {
};
static struct sp_device *sp_dev_master;
-#define attribute_show(name, def) \
+#define version_attribute_show(name, _offset) \
static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
char *buf) \
{ \
struct sp_device *sp = dev_get_drvdata(d); \
struct psp_device *psp = sp->psp_data; \
- int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \
- return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \
+ unsigned int val = ioread32(psp->io_regs + _offset); \
+ return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n", \
+ FIELD_GET(AA, val), \
+ FIELD_GET(BB, val), \
+ FIELD_GET(CC, val), \
+ FIELD_GET(DD, val)); \
}
-attribute_show(fused_part, FUSED_PART)
-static DEVICE_ATTR_RO(fused_part);
-attribute_show(debug_lock_on, DEBUG_LOCK_ON)
-static DEVICE_ATTR_RO(debug_lock_on);
-attribute_show(tsme_status, TSME_STATUS)
-static DEVICE_ATTR_RO(tsme_status);
-attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
-static DEVICE_ATTR_RO(anti_rollback_status);
-attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
-static DEVICE_ATTR_RO(rpmc_production_enabled);
-attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
-static DEVICE_ATTR_RO(rpmc_spirom_available);
-attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
-static DEVICE_ATTR_RO(hsp_tpm_available);
-attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
-static DEVICE_ATTR_RO(rom_armor_enforced);
-
-static struct attribute *psp_attrs[] = {
- &dev_attr_fused_part.attr,
- &dev_attr_debug_lock_on.attr,
- &dev_attr_tsme_status.attr,
- &dev_attr_anti_rollback_status.attr,
- &dev_attr_rpmc_production_enabled.attr,
- &dev_attr_rpmc_spirom_available.attr,
- &dev_attr_hsp_tpm_available.attr,
- &dev_attr_rom_armor_enforced.attr,
- NULL
+version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg)
+static DEVICE_ATTR_RO(bootloader_version);
+version_attribute_show(tee_version, psp->vdata->tee->info_reg)
+static DEVICE_ATTR_RO(tee_version);
+
+static struct attribute *psp_firmware_attrs[] = {
+ &dev_attr_bootloader_version.attr,
+ &dev_attr_tee_version.attr,
+ NULL,
};
-static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct sp_device *sp = dev_get_drvdata(dev);
struct psp_device *psp = sp->psp_data;
+ unsigned int val = 0xffffffff;
+
+ if (!psp)
+ return 0;
- if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING))
+ if (attr == &dev_attr_bootloader_version.attr &&
+ psp->vdata->bootloader_info_reg)
+ val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
+
+ if (attr == &dev_attr_tee_version.attr && psp->capability.tee &&
+ psp->vdata->tee->info_reg)
+ val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
+
+ /* If platform disallows accessing this register it will be all f's */
+ if (val != 0xffffffff)
return 0444;
return 0;
}
-static struct attribute_group psp_attr_group = {
- .attrs = psp_attrs,
- .is_visible = psp_security_is_visible,
+static struct attribute_group psp_firmware_attr_group = {
+ .attrs = psp_firmware_attrs,
+ .is_visible = psp_firmware_is_visible,
};
static const struct attribute_group *psp_groups[] = {
- &psp_attr_group,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ &psp_security_attr_group,
+#endif
+ &psp_firmware_attr_group,
NULL,
};
@@ -179,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp)
pdev_new = to_pci_dev(dev_new);
pdev_cur = to_pci_dev(dev_cur);
- if (pdev_new->bus->number < pdev_cur->bus->number)
- return true;
+ if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus))
+ return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus);
- if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
- return true;
+ if (pdev_new->bus->number != pdev_cur->bus->number)
+ return pdev_new->bus->number < pdev_cur->bus->number;
- if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
- return true;
+ if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn))
+ return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn);
+
+ if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn))
+ return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn);
return false;
}
@@ -342,52 +355,119 @@ static int __maybe_unused sp_pci_resume(struct device *dev)
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
static const struct sev_vdata sevv1 = {
- .cmdresp_reg = 0x10580,
- .cmdbuff_addr_lo_reg = 0x105e0,
- .cmdbuff_addr_hi_reg = 0x105e4,
+ .cmdresp_reg = 0x10580, /* C2PMSG_32 */
+ .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */
+ .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */
};
static const struct sev_vdata sevv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .cmdresp_reg = 0x10980, /* C2PMSG_32 */
+ .cmdbuff_addr_lo_reg = 0x109e0, /* C2PMSG_56 */
+ .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */
};
static const struct tee_vdata teev1 = {
- .cmdresp_reg = 0x10544,
- .cmdbuff_addr_lo_reg = 0x10548,
- .cmdbuff_addr_hi_reg = 0x1054c,
- .ring_wptr_reg = 0x10550,
- .ring_rptr_reg = 0x10554,
+ .ring_wptr_reg = 0x10550, /* C2PMSG_20 */
+ .ring_rptr_reg = 0x10554, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
+};
+
+static const struct tee_vdata teev2 = {
+ .ring_wptr_reg = 0x10950, /* C2PMSG_20 */
+ .ring_rptr_reg = 0x10954, /* C2PMSG_21 */
+ .info_reg = 0x109e8, /* C2PMSG_58 */
+};
+
+static const struct platform_access_vdata pa_v1 = {
+ .cmdresp_reg = 0x10570, /* C2PMSG_28 */
+ .cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */
+ .cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */
+ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
+ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
+};
+
+static const struct platform_access_vdata pa_v2 = {
+ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */
+ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */
};
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
- .feature_reg = 0x105fc,
- .inten_reg = 0x10610,
- .intsts_reg = 0x10614,
+ .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */
+ .feature_reg = 0x105fc, /* C2PMSG_63 */
+ .inten_reg = 0x10610, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .platform_access = &pa_v1,
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
+ .platform_features = PLATFORM_FEATURE_HSTI,
};
static const struct psp_vdata pspv3 = {
.tee = &teev1,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .platform_access = &pa_v1,
+ .cmdresp_reg = 0x10544, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
+ .platform_features = PLATFORM_FEATURE_DBC |
+ PLATFORM_FEATURE_HSTI,
};
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .cmdresp_reg = 0x10544, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
+};
+
+static const struct psp_vdata pspv5 = {
+ .tee = &teev2,
+ .platform_access = &pa_v2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
+static const struct psp_vdata pspv6 = {
+ .sev = &sevv2,
+ .tee = &teev2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
+static const struct psp_vdata pspv7 = {
+ .tee = &teev2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
};
#endif
@@ -444,6 +524,25 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv3,
#endif
},
+ { /* 7 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv5,
+#endif
+ },
+ { /* 8 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv6,
+#endif
+ },
+ { /* 9 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv7,
+#endif
+ },
+
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -451,9 +550,14 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
- { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
{ PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] },
+ { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] },
+ { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
+ { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x115A), (kernel_ulong_t)&dev_vdata[9] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 7d79a8744f9a..3f9843fa7782 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -39,44 +39,27 @@ static const struct sp_dev_vdata dev_vdata[] = {
},
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id sp_acpi_match[] = {
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id sp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
-
-static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
-{
-#ifdef CONFIG_OF
- const struct of_device_id *match;
-
- match = of_match_node(sp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct sp_dev_vdata *)match->data;
-#endif
- return NULL;
-}
-static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
-#ifdef CONFIG_ACPI
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
- return (struct sp_dev_vdata *)match->driver_data;
-#endif
+ return (const struct sp_dev_vdata *)match->driver_data;
+
return NULL;
}
@@ -129,7 +112,7 @@ static int sp_platform_probe(struct platform_device *pdev)
goto e_err;
sp->dev_specific = sp_platform;
- sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ sp->dev_vdata = pdev->dev.of_node ? of_device_get_match_data(&pdev->dev)
: sp_get_acpi_version(pdev);
if (!sp->dev_vdata) {
ret = -ENODEV;
@@ -180,7 +163,7 @@ e_err:
return ret;
}
-static int sp_platform_remove(struct platform_device *pdev)
+static void sp_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
@@ -188,8 +171,6 @@ static int sp_platform_remove(struct platform_device *pdev)
sp_destroy(sp);
dev_notice(dev, "disabled\n");
-
- return 0;
}
#ifdef CONFIG_PM
@@ -214,12 +195,8 @@ static int sp_platform_resume(struct platform_device *pdev)
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
-#ifdef CONFIG_ACPI
.acpi_match_table = sp_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = sp_of_match,
-#endif
},
.probe = sp_platform_probe,
.remove = sp_platform_remove,
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5c9d47f3be37..5e1d80724678 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -8,12 +8,13 @@
* Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/gfp.h>
-#include <linux/psp-sev.h>
+#include <linux/psp.h>
#include <linux/psp-tee.h>
#include "psp-dev.h"
@@ -61,26 +62,6 @@ static void tee_free_ring(struct psp_tee_device *tee)
mutex_destroy(&rb_mgr->mutex);
}
-static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
- unsigned int *reg)
-{
- /* ~10ms sleep per loop => nloop = timeout * 100 */
- int nloop = timeout * 100;
-
- while (--nloop) {
- *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
- if (*reg & PSP_CMDRESP_RESP)
- return 0;
-
- usleep_range(10000, 10100);
- }
-
- dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
- psp_dead = true;
-
- return -ETIMEDOUT;
-}
-
static
struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
{
@@ -109,7 +90,6 @@ static int tee_init_ring(struct psp_tee_device *tee)
{
int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
struct tee_init_ring_cmd *cmd;
- phys_addr_t cmd_buffer;
unsigned int reg;
int ret;
@@ -129,29 +109,21 @@ static int tee_init_ring(struct psp_tee_device *tee)
return -ENOMEM;
}
- cmd_buffer = __psp_pa((void *)cmd);
-
/* Send command buffer details to Trusted OS by writing to
* CPU-PSP message registers
*/
-
- iowrite32(lower_32_bits(cmd_buffer),
- tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
- iowrite32(upper_32_bits(cmd_buffer),
- tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
- iowrite32(TEE_RING_INIT_CMD,
- tee->io_regs + tee->vdata->cmdresp_reg);
-
- ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ ret = psp_mailbox_command(tee->psp, PSP_CMD_TEE_RING_INIT, cmd,
+ TEE_DEFAULT_CMD_TIMEOUT, &reg);
if (ret) {
- dev_err(tee->dev, "tee: ring init command timed out\n");
+ dev_err(tee->dev, "tee: ring init command timed out, disabling TEE support\n");
tee_free_ring(tee);
+ psp_dead = true;
goto free_buf;
}
- if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_err(tee->dev, "tee: ring init command failed (%#010x)\n",
- reg & PSP_CMDRESP_ERR_MASK);
+ if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_err(tee->dev, "tee: ring init command failed (%#010lx)\n",
+ FIELD_GET(PSP_CMDRESP_STS, reg));
tee_free_ring(tee);
ret = -EIO;
}
@@ -173,15 +145,14 @@ static void tee_destroy_ring(struct psp_tee_device *tee)
if (psp_dead)
goto free_ring;
- iowrite32(TEE_RING_DESTROY_CMD,
- tee->io_regs + tee->vdata->cmdresp_reg);
-
- ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ ret = psp_mailbox_command(tee->psp, PSP_CMD_TEE_RING_DESTROY, NULL,
+ TEE_DEFAULT_CMD_TIMEOUT, &reg);
if (ret) {
- dev_err(tee->dev, "tee: ring destroy command timed out\n");
- } else if (reg & PSP_CMDRESP_ERR_MASK) {
- dev_err(tee->dev, "tee: ring destroy command failed (%#010x)\n",
- reg & PSP_CMDRESP_ERR_MASK);
+ dev_err(tee->dev, "tee: ring destroy command timed out, disabling TEE support\n");
+ psp_dead = true;
+ } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n",
+ FIELD_GET(PSP_CMDRESP_STS, reg));
}
free_ring:
@@ -369,7 +340,7 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
if (ret)
return ret;
- ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_RING_TIMEOUT);
if (ret) {
resp->flag = CMD_RESPONSE_TIMEDOUT;
return ret;
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
index 49d26158b71e..ea9a2b7c05f5 100644
--- a/drivers/crypto/ccp/tee-dev.h
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -17,22 +17,11 @@
#include <linux/device.h>
#include <linux/mutex.h>
-#define TEE_DEFAULT_TIMEOUT 10
+#define TEE_DEFAULT_CMD_TIMEOUT (10 * MSEC_PER_SEC)
+#define TEE_DEFAULT_RING_TIMEOUT 10
#define MAX_BUFFER_SIZE 988
/**
- * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
- * @TEE_RING_INIT_CMD: Initialize ring buffer
- * @TEE_RING_DESTROY_CMD: Destroy ring buffer
- * @TEE_RING_MAX_CMD: Maximum command id
- */
-enum tee_ring_cmd_id {
- TEE_RING_INIT_CMD = 0x00010000,
- TEE_RING_DESTROY_CMD = 0x00020000,
- TEE_RING_MAX_CMD = 0x000F0000,
-};
-
-/**
* struct tee_init_ring_cmd - Command to init TEE ring buffer
* @low_addr: bits [31:0] of the physical address of ring buffer
* @hi_addr: bits [63:32] of the physical address of ring buffer