diff options
Diffstat (limited to 'drivers/virt/coco')
-rw-r--r-- | drivers/virt/coco/Kconfig | 4 | ||||
-rw-r--r-- | drivers/virt/coco/Makefile | 2 | ||||
-rw-r--r-- | drivers/virt/coco/arm-cca-guest/Kconfig | 10 | ||||
-rw-r--r-- | drivers/virt/coco/arm-cca-guest/Makefile | 2 | ||||
-rw-r--r-- | drivers/virt/coco/arm-cca-guest/arm-cca-guest.c | 232 | ||||
-rw-r--r-- | drivers/virt/coco/efi_secret/efi_secret.c | 3 | ||||
-rw-r--r-- | drivers/virt/coco/pkvm-guest/Kconfig | 10 | ||||
-rw-r--r-- | drivers/virt/coco/pkvm-guest/Makefile | 2 | ||||
-rw-r--r-- | drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c | 123 | ||||
-rw-r--r-- | drivers/virt/coco/sev-guest/Kconfig | 3 | ||||
-rw-r--r-- | drivers/virt/coco/sev-guest/sev-guest.c | 962 | ||||
-rw-r--r-- | drivers/virt/coco/sev-guest/sev-guest.h | 63 | ||||
-rw-r--r-- | drivers/virt/coco/tdx-guest/tdx-guest.c | 31 | ||||
-rw-r--r-- | drivers/virt/coco/tsm.c | 177 |
14 files changed, 869 insertions, 755 deletions
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig index 87d142c1f932..ff869d883d95 100644 --- a/drivers/virt/coco/Kconfig +++ b/drivers/virt/coco/Kconfig @@ -9,6 +9,10 @@ config TSM_REPORTS source "drivers/virt/coco/efi_secret/Kconfig" +source "drivers/virt/coco/pkvm-guest/Kconfig" + source "drivers/virt/coco/sev-guest/Kconfig" source "drivers/virt/coco/tdx-guest/Kconfig" + +source "drivers/virt/coco/arm-cca-guest/Kconfig" diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile index 18c1aba5edb7..c3d07cfc087e 100644 --- a/drivers/virt/coco/Makefile +++ b/drivers/virt/coco/Makefile @@ -4,5 +4,7 @@ # obj-$(CONFIG_TSM_REPORTS) += tsm.o obj-$(CONFIG_EFI_SECRET) += efi_secret/ +obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/ obj-$(CONFIG_SEV_GUEST) += sev-guest/ obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/ +obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/ diff --git a/drivers/virt/coco/arm-cca-guest/Kconfig b/drivers/virt/coco/arm-cca-guest/Kconfig new file mode 100644 index 000000000000..3f0f013f03f1 --- /dev/null +++ b/drivers/virt/coco/arm-cca-guest/Kconfig @@ -0,0 +1,10 @@ +config ARM_CCA_GUEST + tristate "Arm CCA Guest driver" + depends on ARM64 + select TSM_REPORTS + help + The driver provides userspace interface to request and + attestation report from the Realm Management Monitor(RMM). + + If you choose 'M' here, this module will be called + arm-cca-guest. diff --git a/drivers/virt/coco/arm-cca-guest/Makefile b/drivers/virt/coco/arm-cca-guest/Makefile new file mode 100644 index 000000000000..69eeba08e98a --- /dev/null +++ b/drivers/virt/coco/arm-cca-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest.o diff --git a/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c new file mode 100644 index 000000000000..87f162736b2e --- /dev/null +++ b/drivers/virt/coco/arm-cca-guest/arm-cca-guest.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#include <linux/arm-smccc.h> +#include <linux/cc_platform.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/smp.h> +#include <linux/tsm.h> +#include <linux/types.h> + +#include <asm/rsi.h> + +/** + * struct arm_cca_token_info - a descriptor for the token buffer. + * @challenge: Pointer to the challenge data + * @challenge_size: Size of the challenge data + * @granule: PA of the granule to which the token will be written + * @offset: Offset within granule to start of buffer in bytes + * @result: result of rsi_attestation_token_continue operation + */ +struct arm_cca_token_info { + void *challenge; + unsigned long challenge_size; + phys_addr_t granule; + unsigned long offset; + unsigned long result; +}; + +static void arm_cca_attestation_init(void *param) +{ + struct arm_cca_token_info *info; + + info = (struct arm_cca_token_info *)param; + + info->result = rsi_attestation_token_init(info->challenge, + info->challenge_size); +} + +/** + * arm_cca_attestation_continue - Retrieve the attestation token data. + * + * @param: pointer to the arm_cca_token_info + * + * Attestation token generation is a long running operation and therefore + * the token data may not be retrieved in a single call. Moreover, the + * token retrieval operation must be requested on the same CPU on which the + * attestation token generation was initialised. + * This helper function is therefore scheduled on the same CPU multiple + * times until the entire token data is retrieved. + */ +static void arm_cca_attestation_continue(void *param) +{ + unsigned long len; + unsigned long size; + struct arm_cca_token_info *info; + + info = (struct arm_cca_token_info *)param; + + size = RSI_GRANULE_SIZE - info->offset; + info->result = rsi_attestation_token_continue(info->granule, + info->offset, size, &len); + info->offset += len; +} + +/** + * arm_cca_report_new - Generate a new attestation token. + * + * @report: pointer to the TSM report context information. + * @data: pointer to the context specific data for this module. + * + * Initialise the attestation token generation using the challenge data + * passed in the TSM descriptor. Allocate memory for the attestation token + * and schedule calls to retrieve the attestation token on the same CPU + * on which the attestation token generation was initialised. + * + * The challenge data must be at least 32 bytes and no more than 64 bytes. If + * less than 64 bytes are provided it will be zero padded to 64 bytes. + * + * Return: + * * %0 - Attestation token generated successfully. + * * %-EINVAL - A parameter was not valid. + * * %-ENOMEM - Out of memory. + * * %-EFAULT - Failed to get IPA for memory page(s). + * * A negative status code as returned by smp_call_function_single(). + */ +static int arm_cca_report_new(struct tsm_report *report, void *data) +{ + int ret; + int cpu; + long max_size; + unsigned long token_size = 0; + struct arm_cca_token_info info; + void *buf; + u8 *token __free(kvfree) = NULL; + struct tsm_desc *desc = &report->desc; + + if (desc->inblob_len < 32 || desc->inblob_len > 64) + return -EINVAL; + + /* + * The attestation token 'init' and 'continue' calls must be + * performed on the same CPU. smp_call_function_single() is used + * instead of simply calling get_cpu() because of the need to + * allocate outblob based on the returned value from the 'init' + * call and that cannot be done in an atomic context. + */ + cpu = smp_processor_id(); + + info.challenge = desc->inblob; + info.challenge_size = desc->inblob_len; + + ret = smp_call_function_single(cpu, arm_cca_attestation_init, + &info, true); + if (ret) + return ret; + max_size = info.result; + + if (max_size <= 0) + return -EINVAL; + + /* Allocate outblob */ + token = kvzalloc(max_size, GFP_KERNEL); + if (!token) + return -ENOMEM; + + /* + * Since the outblob may not be physically contiguous, use a page + * to bounce the buffer from RMM. + */ + buf = alloc_pages_exact(RSI_GRANULE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Get the PA of the memory page(s) that were allocated */ + info.granule = (unsigned long)virt_to_phys(buf); + + /* Loop until the token is ready or there is an error */ + do { + /* Retrieve one RSI_GRANULE_SIZE data per loop iteration */ + info.offset = 0; + do { + /* + * Schedule a call to retrieve a sub-granule chunk + * of data per loop iteration. + */ + ret = smp_call_function_single(cpu, + arm_cca_attestation_continue, + (void *)&info, true); + if (ret != 0) { + token_size = 0; + goto exit_free_granule_page; + } + } while (info.result == RSI_INCOMPLETE && + info.offset < RSI_GRANULE_SIZE); + + if (info.result != RSI_SUCCESS) { + ret = -ENXIO; + token_size = 0; + goto exit_free_granule_page; + } + + /* + * Copy the retrieved token data from the granule + * to the token buffer, ensuring that the RMM doesn't + * overflow the buffer. + */ + if (WARN_ON(token_size + info.offset > max_size)) + break; + memcpy(&token[token_size], buf, info.offset); + token_size += info.offset; + } while (info.result == RSI_INCOMPLETE); + + report->outblob = no_free_ptr(token); +exit_free_granule_page: + report->outblob_len = token_size; + free_pages_exact(buf, RSI_GRANULE_SIZE); + return ret; +} + +static const struct tsm_ops arm_cca_tsm_ops = { + .name = KBUILD_MODNAME, + .report_new = arm_cca_report_new, +}; + +/** + * arm_cca_guest_init - Register with the Trusted Security Module (TSM) + * interface. + * + * Return: + * * %0 - Registered successfully with the TSM interface. + * * %-ENODEV - The execution context is not an Arm Realm. + * * %-EBUSY - Already registered. + */ +static int __init arm_cca_guest_init(void) +{ + int ret; + + if (!is_realm_world()) + return -ENODEV; + + ret = tsm_register(&arm_cca_tsm_ops, NULL); + if (ret < 0) + pr_err("Error %d registering with TSM\n", ret); + + return ret; +} +module_init(arm_cca_guest_init); + +/** + * arm_cca_guest_exit - unregister with the Trusted Security Module (TSM) + * interface. + */ +static void __exit arm_cca_guest_exit(void) +{ + tsm_unregister(&arm_cca_tsm_ops); +} +module_exit(arm_cca_guest_exit); + +/* modalias, so userspace can autoload this module when RSI is available */ +static const struct platform_device_id arm_cca_match[] __maybe_unused = { + { RSI_PDEV_NAME, 0}, + { } +}; + +MODULE_DEVICE_TABLE(platform, arm_cca_match); +MODULE_AUTHOR("Sami Mujawar <sami.mujawar@arm.com>"); +MODULE_DESCRIPTION("Arm CCA Guest TSM Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c index e700a5ef7043..1864f9f80617 100644 --- a/drivers/virt/coco/efi_secret/efi_secret.c +++ b/drivers/virt/coco/efi_secret/efi_secret.c @@ -326,11 +326,10 @@ err_unmap: return ret; } -static int efi_secret_remove(struct platform_device *dev) +static void efi_secret_remove(struct platform_device *dev) { efi_secret_securityfs_teardown(dev); efi_secret_unmap_area(); - return 0; } static struct platform_driver efi_secret_driver = { diff --git a/drivers/virt/coco/pkvm-guest/Kconfig b/drivers/virt/coco/pkvm-guest/Kconfig new file mode 100644 index 000000000000..d2f344f1f98f --- /dev/null +++ b/drivers/virt/coco/pkvm-guest/Kconfig @@ -0,0 +1,10 @@ +config ARM_PKVM_GUEST + bool "Arm pKVM protected guest driver" + depends on ARM64 + help + Protected guests running under the pKVM hypervisor on arm64 + are isolated from the host and must issue hypercalls to enable + interaction with virtual devices. This driver implements + support for probing and issuing these hypercalls. + + If unsure, say 'N'. diff --git a/drivers/virt/coco/pkvm-guest/Makefile b/drivers/virt/coco/pkvm-guest/Makefile new file mode 100644 index 000000000000..4bee24579423 --- /dev/null +++ b/drivers/virt/coco/pkvm-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_ARM_PKVM_GUEST) += arm-pkvm-guest.o diff --git a/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c new file mode 100644 index 000000000000..4230b817a80b --- /dev/null +++ b/drivers/virt/coco/pkvm-guest/arm-pkvm-guest.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for the hypercall interface exposed to protected guests by + * pKVM. + * + * Author: Will Deacon <will@kernel.org> + * Copyright (C) 2024 Google LLC + */ + +#include <linux/arm-smccc.h> +#include <linux/array_size.h> +#include <linux/io.h> +#include <linux/mem_encrypt.h> +#include <linux/mm.h> +#include <linux/pgtable.h> + +#include <asm/hypervisor.h> + +static size_t pkvm_granule; + +static int arm_smccc_do_one_page(u32 func_id, phys_addr_t phys) +{ + phys_addr_t end = phys + PAGE_SIZE; + + while (phys < end) { + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res); + if (res.a0 != SMCCC_RET_SUCCESS) + return -EPERM; + + phys += pkvm_granule; + } + + return 0; +} + +static int __set_memory_range(u32 func_id, unsigned long start, int numpages) +{ + void *addr = (void *)start, *end = addr + numpages * PAGE_SIZE; + + while (addr < end) { + int err; + + err = arm_smccc_do_one_page(func_id, virt_to_phys(addr)); + if (err) + return err; + + addr += PAGE_SIZE; + } + + return 0; +} + +static int pkvm_set_memory_encrypted(unsigned long addr, int numpages) +{ + return __set_memory_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID, + addr, numpages); +} + +static int pkvm_set_memory_decrypted(unsigned long addr, int numpages) +{ + return __set_memory_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID, + addr, numpages); +} + +static const struct arm64_mem_crypt_ops pkvm_crypt_ops = { + .encrypt = pkvm_set_memory_encrypted, + .decrypt = pkvm_set_memory_decrypted, +}; + +static int mmio_guard_ioremap_hook(phys_addr_t phys, size_t size, + pgprot_t *prot) +{ + phys_addr_t end; + pteval_t protval = pgprot_val(*prot); + + /* + * We only expect MMIO emulation for regions mapped with device + * attributes. + */ + if (protval != PROT_DEVICE_nGnRE && protval != PROT_DEVICE_nGnRnE) + return 0; + + phys = PAGE_ALIGN_DOWN(phys); + end = phys + PAGE_ALIGN(size); + + while (phys < end) { + const int func_id = ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID; + + WARN_ON_ONCE(arm_smccc_do_one_page(func_id, phys)); + phys += PAGE_SIZE; + } + + return 0; +} + +void pkvm_init_hyp_services(void) +{ + int i; + struct arm_smccc_res res; + const u32 funcs[] = { + ARM_SMCCC_KVM_FUNC_HYP_MEMINFO, + ARM_SMCCC_KVM_FUNC_MEM_SHARE, + ARM_SMCCC_KVM_FUNC_MEM_UNSHARE, + }; + + for (i = 0; i < ARRAY_SIZE(funcs); ++i) { + if (!kvm_arm_hyp_service_available(funcs[i])) + return; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID, + 0, 0, 0, &res); + if (res.a0 > PAGE_SIZE) /* Includes error codes */ + return; + + pkvm_granule = res.a0; + arm64_mem_crypt_ops_register(&pkvm_crypt_ops); + + if (kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD)) + arm64_ioremap_prot_hook_register(&mmio_guard_ioremap_hook); +} diff --git a/drivers/virt/coco/sev-guest/Kconfig b/drivers/virt/coco/sev-guest/Kconfig index 1cffc72c41cb..a6405ab6c2c3 100644 --- a/drivers/virt/coco/sev-guest/Kconfig +++ b/drivers/virt/coco/sev-guest/Kconfig @@ -2,9 +2,6 @@ config SEV_GUEST tristate "AMD SEV Guest driver" default m depends on AMD_MEM_ENCRYPT - select CRYPTO - select CRYPTO_AEAD2 - select CRYPTO_GCM select TSM_REPORTS help SEV-SNP firmware provides the guest a mechanism to communicate with diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 87f241825bc3..70fbc9a3e703 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -2,7 +2,7 @@ /* * AMD Secure Encrypted Virtualization (SEV) guest driver interface * - * Copyright (C) 2021 Advanced Micro Devices, Inc. + * Copyright (C) 2021-2024 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <brijesh.singh@amd.com> */ @@ -17,142 +17,39 @@ #include <linux/set_memory.h> #include <linux/fs.h> #include <linux/tsm.h> -#include <crypto/aead.h> -#include <linux/scatterlist.h> +#include <crypto/gcm.h> #include <linux/psp-sev.h> #include <linux/sockptr.h> #include <linux/cleanup.h> #include <linux/uuid.h> +#include <linux/configfs.h> #include <uapi/linux/sev-guest.h> #include <uapi/linux/psp-sev.h> #include <asm/svm.h> #include <asm/sev.h> -#include "sev-guest.h" - #define DEVICE_NAME "sev-guest" -#define AAD_LEN 48 -#define MSG_HDR_VER 1 - -#define SNP_REQ_MAX_RETRY_DURATION (60*HZ) -#define SNP_REQ_RETRY_DELAY (2*HZ) -struct snp_guest_crypto { - struct crypto_aead *tfm; - u8 *iv, *authtag; - int iv_len, a_len; -}; +#define SVSM_MAX_RETRIES 3 struct snp_guest_dev { struct device *dev; struct miscdevice misc; - void *certs_data; - struct snp_guest_crypto *crypto; - /* request and response are in unencrypted memory */ - struct snp_guest_msg *request, *response; - - /* - * Avoid information leakage by double-buffering shared messages - * in fields that are in regular encrypted memory. - */ - struct snp_guest_msg secret_request, secret_response; - - struct snp_secrets_page_layout *layout; - struct snp_req_data input; - union { - struct snp_report_req report; - struct snp_derived_key_req derived_key; - struct snp_ext_report_req ext_report; - } req; - u32 *os_area_msg_seqno; - u8 *vmpck; + struct snp_msg_desc *msg_desc; }; -static u32 vmpck_id; -module_param(vmpck_id, uint, 0444); -MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); - -/* Mutex to serialize the shared buffer access and command handling. */ -static DEFINE_MUTEX(snp_cmd_mutex); - -static bool is_vmpck_empty(struct snp_guest_dev *snp_dev) -{ - char zero_key[VMPCK_KEY_LEN] = {0}; - - if (snp_dev->vmpck) - return !memcmp(snp_dev->vmpck, zero_key, VMPCK_KEY_LEN); - - return true; -} - /* - * If an error is received from the host or AMD Secure Processor (ASP) there - * are two options. Either retry the exact same encrypted request or discontinue - * using the VMPCK. - * - * This is because in the current encryption scheme GHCB v2 uses AES-GCM to - * encrypt the requests. The IV for this scheme is the sequence number. GCM - * cannot tolerate IV reuse. - * - * The ASP FW v1.51 only increments the sequence numbers on a successful - * guest<->ASP back and forth and only accepts messages at its exact sequence - * number. - * - * So if the sequence number were to be reused the encryption scheme is - * vulnerable. If the sequence number were incremented for a fresh IV the ASP - * will reject the request. + * The VMPCK ID represents the key used by the SNP guest to communicate with the + * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key + * used will be the key associated with the VMPL at which the guest is running. + * Should the default key be wiped (see snp_disable_vmpck()), this parameter + * allows for using one of the remaining VMPCKs. */ -static void snp_disable_vmpck(struct snp_guest_dev *snp_dev) -{ - dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n", - vmpck_id); - memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN); - snp_dev->vmpck = NULL; -} - -static inline u64 __snp_get_msg_seqno(struct snp_guest_dev *snp_dev) -{ - u64 count; - - lockdep_assert_held(&snp_cmd_mutex); - - /* Read the current message sequence counter from secrets pages */ - count = *snp_dev->os_area_msg_seqno; - - return count + 1; -} - -/* Return a non-zero on success */ -static u64 snp_get_msg_seqno(struct snp_guest_dev *snp_dev) -{ - u64 count = __snp_get_msg_seqno(snp_dev); - - /* - * The message sequence counter for the SNP guest request is a 64-bit - * value but the version 2 of GHCB specification defines a 32-bit storage - * for it. If the counter exceeds the 32-bit value then return zero. - * The caller should check the return value, but if the caller happens to - * not check the value and use it, then the firmware treats zero as an - * invalid number and will fail the message request. - */ - if (count >= UINT_MAX) { - dev_err(snp_dev->dev, "request message sequence counter overflow\n"); - return 0; - } - - return count; -} - -static void snp_inc_msg_seqno(struct snp_guest_dev *snp_dev) -{ - /* - * The counter is also incremented by the PSP, so increment it by 2 - * and save in secrets page. - */ - *snp_dev->os_area_msg_seqno += 2; -} +static int vmpck_id = -1; +module_param(vmpck_id, int, 0444); +MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); static inline struct snp_guest_dev *to_snp_dev(struct file *file) { @@ -161,324 +58,6 @@ static inline struct snp_guest_dev *to_snp_dev(struct file *file) return container_of(dev, struct snp_guest_dev, misc); } -static struct snp_guest_crypto *init_crypto(struct snp_guest_dev *snp_dev, u8 *key, size_t keylen) -{ - struct snp_guest_crypto *crypto; - - crypto = kzalloc(sizeof(*crypto), GFP_KERNEL_ACCOUNT); - if (!crypto) - return NULL; - - crypto->tfm = crypto_alloc_aead("gcm(aes)", 0, 0); - if (IS_ERR(crypto->tfm)) - goto e_free; - - if (crypto_aead_setkey(crypto->tfm, key, keylen)) - goto e_free_crypto; - - crypto->iv_len = crypto_aead_ivsize(crypto->tfm); - crypto->iv = kmalloc(crypto->iv_len, GFP_KERNEL_ACCOUNT); - if (!crypto->iv) - goto e_free_crypto; - - if (crypto_aead_authsize(crypto->tfm) > MAX_AUTHTAG_LEN) { - if (crypto_aead_setauthsize(crypto->tfm, MAX_AUTHTAG_LEN)) { - dev_err(snp_dev->dev, "failed to set authsize to %d\n", MAX_AUTHTAG_LEN); - goto e_free_iv; - } - } - - crypto->a_len = crypto_aead_authsize(crypto->tfm); - crypto->authtag = kmalloc(crypto->a_len, GFP_KERNEL_ACCOUNT); - if (!crypto->authtag) - goto e_free_iv; - - return crypto; - -e_free_iv: - kfree(crypto->iv); -e_free_crypto: - crypto_free_aead(crypto->tfm); -e_free: - kfree(crypto); - - return NULL; -} - -static void deinit_crypto(struct snp_guest_crypto *crypto) -{ - crypto_free_aead(crypto->tfm); - kfree(crypto->iv); - kfree(crypto->authtag); - kfree(crypto); -} - -static int enc_dec_message(struct snp_guest_crypto *crypto, struct snp_guest_msg *msg, - u8 *src_buf, u8 *dst_buf, size_t len, bool enc) -{ - struct snp_guest_msg_hdr *hdr = &msg->hdr; - struct scatterlist src[3], dst[3]; - DECLARE_CRYPTO_WAIT(wait); - struct aead_request *req; - int ret; - - req = aead_request_alloc(crypto->tfm, GFP_KERNEL); - if (!req) - return -ENOMEM; - - /* - * AEAD memory operations: - * +------ AAD -------+------- DATA -----+---- AUTHTAG----+ - * | msg header | plaintext | hdr->authtag | - * | bytes 30h - 5Fh | or | | - * | | cipher | | - * +------------------+------------------+----------------+ - */ - sg_init_table(src, 3); - sg_set_buf(&src[0], &hdr->algo, AAD_LEN); - sg_set_buf(&src[1], src_buf, hdr->msg_sz); - sg_set_buf(&src[2], hdr->authtag, crypto->a_len); - - sg_init_table(dst, 3); - sg_set_buf(&dst[0], &hdr->algo, AAD_LEN); - sg_set_buf(&dst[1], dst_buf, hdr->msg_sz); - sg_set_buf(&dst[2], hdr->authtag, crypto->a_len); - - aead_request_set_ad(req, AAD_LEN); - aead_request_set_tfm(req, crypto->tfm); - aead_request_set_callback(req, 0, crypto_req_done, &wait); - - aead_request_set_crypt(req, src, dst, len, crypto->iv); - ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req), &wait); - - aead_request_free(req); - return ret; -} - -static int __enc_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, - void *plaintext, size_t len) -{ - struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_guest_msg_hdr *hdr = &msg->hdr; - - memset(crypto->iv, 0, crypto->iv_len); - memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); - - return enc_dec_message(crypto, msg, plaintext, msg->payload, len, true); -} - -static int dec_payload(struct snp_guest_dev *snp_dev, struct snp_guest_msg *msg, - void *plaintext, size_t len) -{ - struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_guest_msg_hdr *hdr = &msg->hdr; - - /* Build IV with response buffer sequence number */ - memset(crypto->iv, 0, crypto->iv_len); - memcpy(crypto->iv, &hdr->msg_seqno, sizeof(hdr->msg_seqno)); - - return enc_dec_message(crypto, msg, msg->payload, plaintext, len, false); -} - -static int verify_and_dec_payload(struct snp_guest_dev *snp_dev, void *payload, u32 sz) -{ - struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_guest_msg *resp = &snp_dev->secret_response; - struct snp_guest_msg *req = &snp_dev->secret_request; - struct snp_guest_msg_hdr *req_hdr = &req->hdr; - struct snp_guest_msg_hdr *resp_hdr = &resp->hdr; - - dev_dbg(snp_dev->dev, "response [seqno %lld type %d version %d sz %d]\n", - resp_hdr->msg_seqno, resp_hdr->msg_type, resp_hdr->msg_version, resp_hdr->msg_sz); - - /* Copy response from shared memory to encrypted memory. */ - memcpy(resp, snp_dev->response, sizeof(*resp)); - - /* Verify that the sequence counter is incremented by 1 */ - if (unlikely(resp_hdr->msg_seqno != (req_hdr->msg_seqno + 1))) - return -EBADMSG; - - /* Verify response message type and version number. */ - if (resp_hdr->msg_type != (req_hdr->msg_type + 1) || - resp_hdr->msg_version != req_hdr->msg_version) - return -EBADMSG; - - /* - * If the message size is greater than our buffer length then return - * an error. - */ - if (unlikely((resp_hdr->msg_sz + crypto->a_len) > sz)) - return -EBADMSG; - - /* Decrypt the payload */ - return dec_payload(snp_dev, resp, payload, resp_hdr->msg_sz + crypto->a_len); -} - -static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 type, - void *payload, size_t sz) -{ - struct snp_guest_msg *req = &snp_dev->secret_request; - struct snp_guest_msg_hdr *hdr = &req->hdr; - - memset(req, 0, sizeof(*req)); - - hdr->algo = SNP_AEAD_AES_256_GCM; - hdr->hdr_version = MSG_HDR_VER; - hdr->hdr_sz = sizeof(*hdr); - hdr->msg_type = type; - hdr->msg_version = version; - hdr->msg_seqno = seqno; - hdr->msg_vmpck = vmpck_id; - hdr->msg_sz = sz; - - /* Verify the sequence number is non-zero */ - if (!hdr->msg_seqno) - return -ENOSR; - - dev_dbg(snp_dev->dev, "request [seqno %lld type %d version %d sz %d]\n", - hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); - - return __enc_payload(snp_dev, req, payload, sz); -} - -static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, - struct snp_guest_request_ioctl *rio) -{ - unsigned long req_start = jiffies; - unsigned int override_npages = 0; - u64 override_err = 0; - int rc; - -retry_request: - /* - * Call firmware to process the request. In this function the encrypted - * message enters shared memory with the host. So after this call the - * sequence number must be incremented or the VMPCK must be deleted to - * prevent reuse of the IV. - */ - rc = snp_issue_guest_request(exit_code, &snp_dev->input, rio); - switch (rc) { - case -ENOSPC: - /* - * If the extended guest request fails due to having too - * small of a certificate data buffer, retry the same - * guest request without the extended data request in - * order to increment the sequence number and thus avoid - * IV reuse. - */ - override_npages = snp_dev->input.data_npages; - exit_code = SVM_VMGEXIT_GUEST_REQUEST; - - /* - * Override the error to inform callers the given extended - * request buffer size was too small and give the caller the - * required buffer size. - */ - override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN); - - /* - * If this call to the firmware succeeds, the sequence number can - * be incremented allowing for continued use of the VMPCK. If - * there is an error reflected in the return value, this value - * is checked further down and the result will be the deletion - * of the VMPCK and the error code being propagated back to the - * user as an ioctl() return code. - */ - goto retry_request; - - /* - * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been - * throttled. Retry in the driver to avoid returning and reusing the - * message sequence number on a different message. - */ - case -EAGAIN: - if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) { - rc = -ETIMEDOUT; - break; - } - schedule_timeout_killable(SNP_REQ_RETRY_DELAY); - goto retry_request; - } - - /* - * Increment the message sequence number. There is no harm in doing - * this now because decryption uses the value stored in the response - * structure and any failure will wipe the VMPCK, preventing further - * use anyway. - */ - snp_inc_msg_seqno(snp_dev); - - if (override_err) { - rio->exitinfo2 = override_err; - - /* - * If an extended guest request was issued and the supplied certificate - * buffer was not large enough, a standard guest request was issued to - * prevent IV reuse. If the standard request was successful, return -EIO - * back to the caller as would have originally been returned. - */ - if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) - rc = -EIO; - } - - if (override_npages) - snp_dev->input.data_npages = override_npages; - - return rc; -} - -static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, - struct snp_guest_request_ioctl *rio, u8 type, - void *req_buf, size_t req_sz, void *resp_buf, - u32 resp_sz) -{ - u64 seqno; - int rc; - - /* Get message sequence and verify that its a non-zero */ - seqno = snp_get_msg_seqno(snp_dev); - if (!seqno) - return -EIO; - - /* Clear shared memory's response for the host to populate. */ - memset(snp_dev->response, 0, sizeof(struct snp_guest_msg)); - - /* Encrypt the userspace provided payload in snp_dev->secret_request. */ - rc = enc_payload(snp_dev, seqno, rio->msg_version, type, req_buf, req_sz); - if (rc) - return rc; - - /* - * Write the fully encrypted request to the shared unencrypted - * request page. - */ - memcpy(snp_dev->request, &snp_dev->secret_request, - sizeof(snp_dev->secret_request)); - - rc = __handle_guest_request(snp_dev, exit_code, rio); - if (rc) { - if (rc == -EIO && - rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) - return rc; - - dev_alert(snp_dev->dev, - "Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n", - rc, rio->exitinfo2); - - snp_disable_vmpck(snp_dev); - return rc; - } - - rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); - if (rc) { - dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc); - snp_disable_vmpck(snp_dev); - return rc; - } - - return 0; -} - struct snp_req_resp { sockptr_t req_data; sockptr_t resp_data; @@ -486,17 +65,20 @@ struct snp_req_resp { static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { - struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_report_req *req = &snp_dev->req.report; - struct snp_report_resp *resp; + struct snp_report_req *report_req __free(kfree) = NULL; + struct snp_msg_desc *mdesc = snp_dev->msg_desc; + struct snp_report_resp *report_resp; + struct snp_guest_req req = {}; int rc, resp_len; - lockdep_assert_held(&snp_cmd_mutex); - if (!arg->req_data || !arg->resp_data) return -EINVAL; - if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) + report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT); + if (!report_req) + return -ENOMEM; + + if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req))) return -EFAULT; /* @@ -504,36 +86,42 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io * response payload. Make sure that it has enough space to cover the * authtag. */ - resp_len = sizeof(resp->data) + crypto->a_len; - resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); - if (!resp) + resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; + report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!report_resp) return -ENOMEM; - rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, - SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data, - resp_len); + req.msg_version = arg->msg_version; + req.msg_type = SNP_MSG_REPORT_REQ; + req.vmpck_id = mdesc->vmpck_id; + req.req_buf = report_req; + req.req_sz = sizeof(*report_req); + req.resp_buf = report_resp->data; + req.resp_sz = resp_len; + req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; + + rc = snp_send_guest_request(mdesc, &req, arg); if (rc) goto e_free; - if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp))) + if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp))) rc = -EFAULT; e_free: - kfree(resp); + kfree(report_resp); return rc; } static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { - struct snp_derived_key_req *req = &snp_dev->req.derived_key; - struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_derived_key_resp resp = {0}; + struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; + struct snp_derived_key_resp derived_key_resp = {0}; + struct snp_msg_desc *mdesc = snp_dev->msg_desc; + struct snp_guest_req req = {}; int rc, resp_len; /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ u8 buf[64 + 16]; - lockdep_assert_held(&snp_cmd_mutex); - if (!arg->req_data || !arg->resp_data) return -EINVAL; @@ -542,25 +130,39 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque * response payload. Make sure that it has enough space to cover the * authtag. */ - resp_len = sizeof(resp.data) + crypto->a_len; + resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; if (sizeof(buf) < resp_len) return -ENOMEM; - if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req))) + derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); + if (!derived_key_req) + return -ENOMEM; + + if (copy_from_user(derived_key_req, (void __user *)arg->req_data, + sizeof(*derived_key_req))) return -EFAULT; - rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg, - SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len); + req.msg_version = arg->msg_version; + req.msg_type = SNP_MSG_KEY_REQ; + req.vmpck_id = mdesc->vmpck_id; + req.req_buf = derived_key_req; + req.req_sz = sizeof(*derived_key_req); + req.resp_buf = buf; + req.resp_sz = resp_len; + req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; + + rc = snp_send_guest_request(mdesc, &req, arg); if (rc) return rc; - memcpy(resp.data, buf, sizeof(resp.data)); - if (copy_to_user((void __user *)arg->resp_data, &resp, sizeof(resp))) + memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data)); + if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp, + sizeof(derived_key_resp))) rc = -EFAULT; /* The response buffer contains the sensitive data, explicitly clear it. */ memzero_explicit(buf, sizeof(buf)); - memzero_explicit(&resp, sizeof(resp)); + memzero_explicit(&derived_key_resp, sizeof(derived_key_resp)); return rc; } @@ -568,33 +170,37 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques struct snp_req_resp *io) { - struct snp_ext_report_req *req = &snp_dev->req.ext_report; - struct snp_guest_crypto *crypto = snp_dev->crypto; - struct snp_report_resp *resp; + struct snp_ext_report_req *report_req __free(kfree) = NULL; + struct snp_msg_desc *mdesc = snp_dev->msg_desc; + struct snp_report_resp *report_resp; + struct snp_guest_req req = {}; int ret, npages = 0, resp_len; sockptr_t certs_address; - - lockdep_assert_held(&snp_cmd_mutex); + struct page *page; if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) return -EINVAL; - if (copy_from_sockptr(req, io->req_data, sizeof(*req))) + report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT); + if (!report_req) + return -ENOMEM; + + if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req))) return -EFAULT; /* caller does not want certificate data */ - if (!req->certs_len || !req->certs_address) + if (!report_req->certs_len || !report_req->certs_address) goto cmd; - if (req->certs_len > SEV_FW_BLOB_MAX_SIZE || - !IS_ALIGNED(req->certs_len, PAGE_SIZE)) + if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE || + !IS_ALIGNED(report_req->certs_len, PAGE_SIZE)) return -EINVAL; if (sockptr_is_kernel(io->resp_data)) { - certs_address = KERNEL_SOCKPTR((void *)req->certs_address); + certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address); } else { - certs_address = USER_SOCKPTR((void __user *)req->certs_address); - if (!access_ok(certs_address.user, req->certs_len)) + certs_address = USER_SOCKPTR((void __user *)report_req->certs_address); + if (!access_ok(certs_address.user, report_req->certs_len)) return -EFAULT; } @@ -604,45 +210,74 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques * the host. If host does not supply any certs in it, then copy * zeros to indicate that certificate data was not provided. */ - memset(snp_dev->certs_data, 0, req->certs_len); - npages = req->certs_len >> PAGE_SHIFT; + npages = report_req->certs_len >> PAGE_SHIFT; + page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, + get_order(report_req->certs_len)); + if (!page) + return -ENOMEM; + + req.certs_data = page_address(page); + ret = set_memory_decrypted((unsigned long)req.certs_data, npages); + if (ret) { + pr_err("failed to mark page shared, ret=%d\n", ret); + __free_pages(page, get_order(report_req->certs_len)); + return -EFAULT; + } + cmd: /* * The intermediate response buffer is used while decrypting the * response payload. Make sure that it has enough space to cover the * authtag. */ - resp_len = sizeof(resp->data) + crypto->a_len; - resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); - if (!resp) - return -ENOMEM; + resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; + report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); + if (!report_resp) { + ret = -ENOMEM; + goto e_free_data; + } + + req.input.data_npages = npages; + + req.msg_version = arg->msg_version; + req.msg_type = SNP_MSG_REPORT_REQ; + req.vmpck_id = mdesc->vmpck_id; + req.req_buf = &report_req->data; + req.req_sz = sizeof(report_req->data); + req.resp_buf = report_resp->data; + req.resp_sz = resp_len; + req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST; - snp_dev->input.data_npages = npages; - ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg, - SNP_MSG_REPORT_REQ, &req->data, - sizeof(req->data), resp->data, resp_len); + ret = snp_send_guest_request(mdesc, &req, arg); /* If certs length is invalid then copy the returned length */ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { - req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT; + report_req->certs_len = req.input.data_npages << PAGE_SHIFT; - if (copy_to_sockptr(io->req_data, req, sizeof(*req))) + if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req))) ret = -EFAULT; } if (ret) goto e_free; - if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) { + if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) { ret = -EFAULT; goto e_free; } - if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp))) + if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp))) ret = -EFAULT; e_free: - kfree(resp); + kfree(report_resp); +e_free_data: + if (npages) { + if (set_memory_encrypted((unsigned long)req.certs_data, npages)) + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); + else + __free_pages(page, get_order(report_req->certs_len)); + } return ret; } @@ -663,15 +298,6 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long if (!input.msg_version) return -EINVAL; - mutex_lock(&snp_cmd_mutex); - - /* Check if the VMPCK is not empty */ - if (is_vmpck_empty(snp_dev)) { - dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); - mutex_unlock(&snp_cmd_mutex); - return -ENOTTY; - } - switch (ioctl) { case SNP_GET_REPORT: ret = get_report(snp_dev, &input); @@ -693,95 +319,163 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long break; } - mutex_unlock(&snp_cmd_mutex); - if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input))) return -EFAULT; return ret; } -static void free_shared_pages(void *buf, size_t sz) +static const struct file_operations snp_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = snp_guest_ioctl, +}; + +struct snp_msg_report_resp_hdr { + u32 status; + u32 report_size; + u8 rsvd[24]; +}; + +struct snp_msg_cert_entry { + guid_t guid; + u32 offset; + u32 length; +}; + +static int sev_svsm_report_new(struct tsm_report *report, void *data) { - unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; + unsigned int rep_len, man_len, certs_len; + struct tsm_desc *desc = &report->desc; + struct svsm_attest_call ac = {}; + unsigned int retry_count; + void *rep, *man, *certs; + struct svsm_call call; + unsigned int size; + bool try_again; + void *buffer; + u64 call_id; int ret; - if (!buf) - return; + /* + * Allocate pages for the request: + * - Report blob (4K) + * - Manifest blob (4K) + * - Certificate blob (16K) + * + * Above addresses must be 4K aligned + */ + rep_len = SZ_4K; + man_len = SZ_4K; + certs_len = SEV_FW_BLOB_MAX_SIZE; - ret = set_memory_encrypted((unsigned long)buf, npages); - if (ret) { - WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); - return; + if (guid_is_null(&desc->service_guid)) { + call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES); + } else { + export_guid(ac.service_guid, &desc->service_guid); + ac.service_manifest_ver = desc->service_manifest_version; + + call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE); } - __free_pages(virt_to_page(buf), get_order(sz)); -} + retry_count = 0; -static void *alloc_shared_pages(struct device *dev, size_t sz) -{ - unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; - struct page *page; - int ret; +retry: + memset(&call, 0, sizeof(call)); - page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); - if (!page) - return NULL; + size = rep_len + man_len + certs_len; + buffer = alloc_pages_exact(size, __GFP_ZERO); + if (!buffer) + return -ENOMEM; - ret = set_memory_decrypted((unsigned long)page_address(page), npages); + rep = buffer; + ac.report_buf.pa = __pa(rep); + ac.report_buf.len = rep_len; + + man = rep + rep_len; + ac.manifest_buf.pa = __pa(man); + ac.manifest_buf.len = man_len; + + certs = man + man_len; + ac.certificates_buf.pa = __pa(certs); + ac.certificates_buf.len = certs_len; + + ac.nonce.pa = __pa(desc->inblob); + ac.nonce.len = desc->inblob_len; + + ret = snp_issue_svsm_attest_req(call_id, &call, &ac); if (ret) { - dev_err(dev, "failed to mark page shared, ret=%d\n", ret); - __free_pages(page, get_order(sz)); - return NULL; + free_pages_exact(buffer, size); + + switch (call.rax_out) { + case SVSM_ERR_INVALID_PARAMETER: + try_again = false; + + if (ac.report_buf.len > rep_len) { + rep_len = PAGE_ALIGN(ac.report_buf.len); + try_again = true; + } + + if (ac.manifest_buf.len > man_len) { + man_len = PAGE_ALIGN(ac.manifest_buf.len); + try_again = true; + } + + if (ac.certificates_buf.len > certs_len) { + certs_len = PAGE_ALIGN(ac.certificates_buf.len); + try_again = true; + } + + /* If one of the buffers wasn't large enough, retry the request */ + if (try_again && retry_count < SVSM_MAX_RETRIES) { + retry_count++; + goto retry; + } + + return -EINVAL; + default: + pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n", + ret, call.rax_out); + return -EINVAL; + } } - return page_address(page); -} + /* + * Allocate all the blob memory buffers at once so that the cleanup is + * done for errors that occur after the first allocation (i.e. before + * using no_free_ptr()). + */ + rep_len = ac.report_buf.len; + void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL); -static const struct file_operations snp_guest_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = snp_guest_ioctl, -}; + man_len = ac.manifest_buf.len; + void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL); -static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno) -{ - u8 *key = NULL; + certs_len = ac.certificates_buf.len; + void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL; - switch (id) { - case 0: - *seqno = &layout->os_area.msg_seqno_0; - key = layout->vmpck0; - break; - case 1: - *seqno = &layout->os_area.msg_seqno_1; - key = layout->vmpck1; - break; - case 2: - *seqno = &layout->os_area.msg_seqno_2; - key = layout->vmpck2; - break; - case 3: - *seqno = &layout->os_area.msg_seqno_3; - key = layout->vmpck3; - break; - default: - break; + if (!rbuf || !mbuf || (certs_len && !cbuf)) { + free_pages_exact(buffer, size); + return -ENOMEM; } - return key; -} + memcpy(rbuf, rep, rep_len); + report->outblob = no_free_ptr(rbuf); + report->outblob_len = rep_len; -struct snp_msg_report_resp_hdr { - u32 status; - u32 report_size; - u8 rsvd[24]; -}; + memcpy(mbuf, man, man_len); + report->manifestblob = no_free_ptr(mbuf); + report->manifestblob_len = man_len; -struct snp_msg_cert_entry { - guid_t guid; - u32 offset; - u32 length; -}; + if (certs_len) { + memcpy(cbuf, certs, certs_len); + report->auxblob = no_free_ptr(cbuf); + report->auxblob_len = certs_len; + } + + free_pages_exact(buffer, size); + + return 0; +} static int sev_report_new(struct tsm_report *report, void *data) { @@ -797,18 +491,17 @@ static int sev_report_new(struct tsm_report *report, void *data) if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE) return -EINVAL; + if (desc->service_provider) { + if (strcmp(desc->service_provider, "svsm")) + return -EINVAL; + + return sev_svsm_report_new(report, data); + } + void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; - guard(mutex)(&snp_cmd_mutex); - - /* Check if the VMPCK is not empty */ - if (is_vmpck_empty(snp_dev)) { - dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n"); - return -ENOTTY; - } - cert_table = buf + report_size; struct snp_ext_report_req ext_req = { .data = { .vmpl = desc->privlevel }, @@ -885,9 +578,42 @@ static int sev_report_new(struct tsm_report *report, void *data) return 0; } -static const struct tsm_ops sev_tsm_ops = { +static bool sev_report_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_GENERATION: + case TSM_REPORT_PROVIDER: + case TSM_REPORT_PRIVLEVEL: + case TSM_REPORT_PRIVLEVEL_FLOOR: + return true; + case TSM_REPORT_SERVICE_PROVIDER: + case TSM_REPORT_SERVICE_GUID: + case TSM_REPORT_SERVICE_MANIFEST_VER: + return snp_vmpl; + } + + return false; +} + +static bool sev_report_bin_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_INBLOB: + case TSM_REPORT_OUTBLOB: + case TSM_REPORT_AUXBLOB: + return true; + case TSM_REPORT_MANIFESTBLOB: + return snp_vmpl; + } + + return false; +} + +static struct tsm_ops sev_tsm_ops = { .name = KBUILD_MODNAME, .report_new = sev_report_new, + .report_attr_visible = sev_report_attr_visible, + .report_bin_attr_visible = sev_report_bin_attr_visible, }; static void unregister_sev_tsm(void *data) @@ -897,100 +623,60 @@ static void unregister_sev_tsm(void *data) static int __init sev_guest_probe(struct platform_device *pdev) { - struct snp_secrets_page_layout *layout; - struct sev_guest_platform_data *data; struct device *dev = &pdev->dev; struct snp_guest_dev *snp_dev; + struct snp_msg_desc *mdesc; struct miscdevice *misc; - void __iomem *mapping; int ret; - if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) - return -ENODEV; + BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE); - if (!dev->platform_data) - return -ENODEV; - - data = (struct sev_guest_platform_data *)dev->platform_data; - mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); - if (!mapping) + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return -ENODEV; - layout = (__force void *)mapping; - - ret = -ENOMEM; snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); if (!snp_dev) - goto e_unmap; + return -ENOMEM; - ret = -EINVAL; - snp_dev->vmpck = get_vmpck(vmpck_id, layout, &snp_dev->os_area_msg_seqno); - if (!snp_dev->vmpck) { - dev_err(dev, "invalid vmpck id %d\n", vmpck_id); - goto e_unmap; - } + mdesc = snp_msg_alloc(); + if (IS_ERR_OR_NULL(mdesc)) + return -ENOMEM; - /* Verify that VMPCK is not zero. */ - if (is_vmpck_empty(snp_dev)) { - dev_err(dev, "vmpck id %d is null\n", vmpck_id); - goto e_unmap; - } + ret = snp_msg_init(mdesc, vmpck_id); + if (ret) + goto e_msg_init; platform_set_drvdata(pdev, snp_dev); snp_dev->dev = dev; - snp_dev->layout = layout; - - /* Allocate the shared page used for the request and response message. */ - snp_dev->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); - if (!snp_dev->request) - goto e_unmap; - - snp_dev->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); - if (!snp_dev->response) - goto e_free_request; - - snp_dev->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); - if (!snp_dev->certs_data) - goto e_free_response; - - ret = -EIO; - snp_dev->crypto = init_crypto(snp_dev, snp_dev->vmpck, VMPCK_KEY_LEN); - if (!snp_dev->crypto) - goto e_free_cert_data; misc = &snp_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = DEVICE_NAME; misc->fops = &snp_guest_fops; - /* initial the input address for guest request */ - snp_dev->input.req_gpa = __pa(snp_dev->request); - snp_dev->input.resp_gpa = __pa(snp_dev->response); - snp_dev->input.data_gpa = __pa(snp_dev->certs_data); + /* Set the privlevel_floor attribute based on the vmpck_id */ + sev_tsm_ops.privlevel_floor = mdesc->vmpck_id; - ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type); + ret = tsm_register(&sev_tsm_ops, snp_dev); if (ret) - goto e_free_cert_data; + goto e_msg_init; ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL); if (ret) - goto e_free_cert_data; + goto e_msg_init; ret = misc_register(misc); if (ret) - goto e_free_cert_data; + goto e_msg_init; - dev_info(dev, "Initialized SEV guest driver (using vmpck_id %d)\n", vmpck_id); + snp_dev->msg_desc = mdesc; + dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", + mdesc->vmpck_id); return 0; -e_free_cert_data: - free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); -e_free_response: - free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); -e_free_request: - free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); -e_unmap: - iounmap(mapping); +e_msg_init: + snp_msg_free(mdesc); + return ret; } @@ -998,10 +684,7 @@ static void __exit sev_guest_remove(struct platform_device *pdev) { struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); - free_shared_pages(snp_dev->certs_data, SEV_FW_BLOB_MAX_SIZE); - free_shared_pages(snp_dev->response, sizeof(struct snp_guest_msg)); - free_shared_pages(snp_dev->request, sizeof(struct snp_guest_msg)); - deinit_crypto(snp_dev->crypto); + snp_msg_free(snp_dev->msg_desc); misc_deregister(&snp_dev->misc); } @@ -1009,9 +692,14 @@ static void __exit sev_guest_remove(struct platform_device *pdev) * This driver is meant to be a common SEV guest interface driver and to * support any SEV guest API. As such, even though it has been introduced * with the SEV-SNP support, it is named "sev-guest". + * + * sev_guest_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound + * at runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. */ -static struct platform_driver sev_guest_driver = { - .remove_new = __exit_p(sev_guest_remove), +static struct platform_driver sev_guest_driver __refdata = { + .remove = __exit_p(sev_guest_remove), .driver = { .name = "sev-guest", }, diff --git a/drivers/virt/coco/sev-guest/sev-guest.h b/drivers/virt/coco/sev-guest/sev-guest.h deleted file mode 100644 index 21bda26fdb95..000000000000 --- a/drivers/virt/coco/sev-guest/sev-guest.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2021 Advanced Micro Devices, Inc. - * - * Author: Brijesh Singh <brijesh.singh@amd.com> - * - * SEV-SNP API spec is available at https://developer.amd.com/sev - */ - -#ifndef __VIRT_SEVGUEST_H__ -#define __VIRT_SEVGUEST_H__ - -#include <linux/types.h> - -#define MAX_AUTHTAG_LEN 32 - -/* See SNP spec SNP_GUEST_REQUEST section for the structure */ -enum msg_type { - SNP_MSG_TYPE_INVALID = 0, - SNP_MSG_CPUID_REQ, - SNP_MSG_CPUID_RSP, - SNP_MSG_KEY_REQ, - SNP_MSG_KEY_RSP, - SNP_MSG_REPORT_REQ, - SNP_MSG_REPORT_RSP, - SNP_MSG_EXPORT_REQ, - SNP_MSG_EXPORT_RSP, - SNP_MSG_IMPORT_REQ, - SNP_MSG_IMPORT_RSP, - SNP_MSG_ABSORB_REQ, - SNP_MSG_ABSORB_RSP, - SNP_MSG_VMRK_REQ, - SNP_MSG_VMRK_RSP, - - SNP_MSG_TYPE_MAX -}; - -enum aead_algo { - SNP_AEAD_INVALID, - SNP_AEAD_AES_256_GCM, -}; - -struct snp_guest_msg_hdr { - u8 authtag[MAX_AUTHTAG_LEN]; - u64 msg_seqno; - u8 rsvd1[8]; - u8 algo; - u8 hdr_version; - u16 hdr_sz; - u8 msg_type; - u8 msg_version; - u16 msg_sz; - u32 rsvd2; - u8 msg_vmpck; - u8 rsvd3[35]; -} __packed; - -struct snp_guest_msg { - struct snp_guest_msg_hdr hdr; - u8 payload[4000]; -} __packed; - -#endif /* __VIRT_SEVGUEST_H__ */ diff --git a/drivers/virt/coco/tdx-guest/tdx-guest.c b/drivers/virt/coco/tdx-guest/tdx-guest.c index 1253bf76b570..224e7dde9cde 100644 --- a/drivers/virt/coco/tdx-guest/tdx-guest.c +++ b/drivers/virt/coco/tdx-guest/tdx-guest.c @@ -124,10 +124,8 @@ static void *alloc_quote_buf(void) if (!addr) return NULL; - if (set_memory_decrypted((unsigned long)addr, count)) { - free_pages_exact(addr, len); + if (set_memory_decrypted((unsigned long)addr, count)) return NULL; - } return addr; } @@ -249,6 +247,28 @@ done: return ret; } +static bool tdx_report_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_GENERATION: + case TSM_REPORT_PROVIDER: + return true; + } + + return false; +} + +static bool tdx_report_bin_attr_visible(int n) +{ + switch (n) { + case TSM_REPORT_INBLOB: + case TSM_REPORT_OUTBLOB: + return true; + } + + return false; +} + static long tdx_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -263,7 +283,6 @@ static long tdx_guest_ioctl(struct file *file, unsigned int cmd, static const struct file_operations tdx_guest_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tdx_guest_ioctl, - .llseek = no_llseek, }; static struct miscdevice tdx_misc_dev = { @@ -281,6 +300,8 @@ MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids); static const struct tsm_ops tdx_tsm_ops = { .name = KBUILD_MODNAME, .report_new = tdx_report_new, + .report_attr_visible = tdx_report_attr_visible, + .report_bin_attr_visible = tdx_report_bin_attr_visible, }; static int __init tdx_guest_init(void) @@ -301,7 +322,7 @@ static int __init tdx_guest_init(void) goto free_misc; } - ret = tsm_register(&tdx_tsm_ops, NULL, NULL); + ret = tsm_register(&tdx_tsm_ops, NULL); if (ret) goto free_quote; diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c index d1c2db83a8ca..9432d4e303f1 100644 --- a/drivers/virt/coco/tsm.c +++ b/drivers/virt/coco/tsm.c @@ -14,7 +14,6 @@ static struct tsm_provider { const struct tsm_ops *ops; - const struct config_item_type *type; void *data; } provider; static DECLARE_RWSEM(tsm_rwsem); @@ -35,7 +34,7 @@ static DECLARE_RWSEM(tsm_rwsem); * The attestation report format is TSM provider specific, when / if a standard * materializes that can be published instead of the vendor layout. Until then * the 'provider' attribute indicates the format of 'outblob', and optionally - * 'auxblob'. + * 'auxblob' and 'manifestblob'. */ struct tsm_report_state { @@ -48,6 +47,7 @@ struct tsm_report_state { enum tsm_data_select { TSM_REPORT, TSM_CERTS, + TSM_MANIFEST, }; static struct tsm_report *to_tsm_report(struct config_item *cfg) @@ -119,6 +119,74 @@ static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg, } CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor); +static ssize_t tsm_report_service_provider_store(struct config_item *cfg, + const char *buf, size_t len) +{ + struct tsm_report *report = to_tsm_report(cfg); + size_t sp_len; + char *sp; + int rc; + + guard(rwsem_write)(&tsm_rwsem); + rc = try_advance_write_generation(report); + if (rc) + return rc; + + sp_len = (buf[len - 1] != '\n') ? len : len - 1; + + sp = kstrndup(buf, sp_len, GFP_KERNEL); + if (!sp) + return -ENOMEM; + kfree(report->desc.service_provider); + + report->desc.service_provider = sp; + + return len; +} +CONFIGFS_ATTR_WO(tsm_report_, service_provider); + +static ssize_t tsm_report_service_guid_store(struct config_item *cfg, + const char *buf, size_t len) +{ + struct tsm_report *report = to_tsm_report(cfg); + int rc; + + guard(rwsem_write)(&tsm_rwsem); + rc = try_advance_write_generation(report); + if (rc) + return rc; + + report->desc.service_guid = guid_null; + + rc = guid_parse(buf, &report->desc.service_guid); + if (rc) + return rc; + + return len; +} +CONFIGFS_ATTR_WO(tsm_report_, service_guid); + +static ssize_t tsm_report_service_manifest_version_store(struct config_item *cfg, + const char *buf, size_t len) +{ + struct tsm_report *report = to_tsm_report(cfg); + unsigned int val; + int rc; + + rc = kstrtouint(buf, 0, &val); + if (rc) + return rc; + + guard(rwsem_write)(&tsm_rwsem); + rc = try_advance_write_generation(report); + if (rc) + return rc; + report->desc.service_manifest_version = val; + + return len; +} +CONFIGFS_ATTR_WO(tsm_report_, service_manifest_version); + static ssize_t tsm_report_inblob_write(struct config_item *cfg, const void *buf, size_t count) { @@ -163,6 +231,9 @@ static ssize_t __read_report(struct tsm_report *report, void *buf, size_t count, if (select == TSM_REPORT) { out = report->outblob; len = report->outblob_len; + } else if (select == TSM_MANIFEST) { + out = report->manifestblob; + len = report->manifestblob_len; } else { out = report->auxblob; len = report->auxblob_len; @@ -188,7 +259,7 @@ static ssize_t read_cached_report(struct tsm_report *report, void *buf, /* * A given TSM backend always fills in ->outblob regardless of - * whether the report includes an auxblob or not. + * whether the report includes an auxblob/manifestblob or not. */ if (!report->outblob || state->read_generation != state->write_generation) @@ -224,8 +295,10 @@ static ssize_t tsm_report_read(struct tsm_report *report, void *buf, kvfree(report->outblob); kvfree(report->auxblob); + kvfree(report->manifestblob); report->outblob = NULL; report->auxblob = NULL; + report->manifestblob = NULL; rc = ops->report_new(report, provider.data); if (rc < 0) return rc; @@ -252,34 +325,31 @@ static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf, } CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX); -#define TSM_DEFAULT_ATTRS() \ - &tsm_report_attr_generation, \ - &tsm_report_attr_provider +static ssize_t tsm_report_manifestblob_read(struct config_item *cfg, void *buf, + size_t count) +{ + struct tsm_report *report = to_tsm_report(cfg); -static struct configfs_attribute *tsm_report_attrs[] = { - TSM_DEFAULT_ATTRS(), - NULL, -}; + return tsm_report_read(report, buf, count, TSM_MANIFEST); +} +CONFIGFS_BIN_ATTR_RO(tsm_report_, manifestblob, NULL, TSM_OUTBLOB_MAX); -static struct configfs_attribute *tsm_report_extra_attrs[] = { - TSM_DEFAULT_ATTRS(), - &tsm_report_attr_privlevel, - &tsm_report_attr_privlevel_floor, +static struct configfs_attribute *tsm_report_attrs[] = { + [TSM_REPORT_GENERATION] = &tsm_report_attr_generation, + [TSM_REPORT_PROVIDER] = &tsm_report_attr_provider, + [TSM_REPORT_PRIVLEVEL] = &tsm_report_attr_privlevel, + [TSM_REPORT_PRIVLEVEL_FLOOR] = &tsm_report_attr_privlevel_floor, + [TSM_REPORT_SERVICE_PROVIDER] = &tsm_report_attr_service_provider, + [TSM_REPORT_SERVICE_GUID] = &tsm_report_attr_service_guid, + [TSM_REPORT_SERVICE_MANIFEST_VER] = &tsm_report_attr_service_manifest_version, NULL, }; -#define TSM_DEFAULT_BIN_ATTRS() \ - &tsm_report_attr_inblob, \ - &tsm_report_attr_outblob - static struct configfs_bin_attribute *tsm_report_bin_attrs[] = { - TSM_DEFAULT_BIN_ATTRS(), - NULL, -}; - -static struct configfs_bin_attribute *tsm_report_bin_extra_attrs[] = { - TSM_DEFAULT_BIN_ATTRS(), - &tsm_report_attr_auxblob, + [TSM_REPORT_INBLOB] = &tsm_report_attr_inblob, + [TSM_REPORT_OUTBLOB] = &tsm_report_attr_outblob, + [TSM_REPORT_AUXBLOB] = &tsm_report_attr_auxblob, + [TSM_REPORT_MANIFESTBLOB] = &tsm_report_attr_manifestblob, NULL, }; @@ -288,8 +358,10 @@ static void tsm_report_item_release(struct config_item *cfg) struct tsm_report *report = to_tsm_report(cfg); struct tsm_report_state *state = to_state(report); + kvfree(report->manifestblob); kvfree(report->auxblob); kvfree(report->outblob); + kfree(report->desc.service_provider); kfree(state); } @@ -297,21 +369,44 @@ static struct configfs_item_operations tsm_report_item_ops = { .release = tsm_report_item_release, }; -const struct config_item_type tsm_report_default_type = { - .ct_owner = THIS_MODULE, - .ct_bin_attrs = tsm_report_bin_attrs, - .ct_attrs = tsm_report_attrs, - .ct_item_ops = &tsm_report_item_ops, +static bool tsm_report_is_visible(struct config_item *item, + struct configfs_attribute *attr, int n) +{ + guard(rwsem_read)(&tsm_rwsem); + if (!provider.ops) + return false; + + if (!provider.ops->report_attr_visible) + return true; + + return provider.ops->report_attr_visible(n); +} + +static bool tsm_report_is_bin_visible(struct config_item *item, + struct configfs_bin_attribute *attr, int n) +{ + guard(rwsem_read)(&tsm_rwsem); + if (!provider.ops) + return false; + + if (!provider.ops->report_bin_attr_visible) + return true; + + return provider.ops->report_bin_attr_visible(n); +} + +static struct configfs_group_operations tsm_report_attr_group_ops = { + .is_visible = tsm_report_is_visible, + .is_bin_visible = tsm_report_is_bin_visible, }; -EXPORT_SYMBOL_GPL(tsm_report_default_type); -const struct config_item_type tsm_report_extra_type = { +static const struct config_item_type tsm_report_type = { .ct_owner = THIS_MODULE, - .ct_bin_attrs = tsm_report_bin_extra_attrs, - .ct_attrs = tsm_report_extra_attrs, + .ct_bin_attrs = tsm_report_bin_attrs, + .ct_attrs = tsm_report_attrs, .ct_item_ops = &tsm_report_item_ops, + .ct_group_ops = &tsm_report_attr_group_ops, }; -EXPORT_SYMBOL_GPL(tsm_report_extra_type); static struct config_item *tsm_report_make_item(struct config_group *group, const char *name) @@ -326,7 +421,7 @@ static struct config_item *tsm_report_make_item(struct config_group *group, if (!state) return ERR_PTR(-ENOMEM); - config_item_init_type_name(&state->cfg, name, provider.type); + config_item_init_type_name(&state->cfg, name, &tsm_report_type); return &state->cfg; } @@ -353,16 +448,10 @@ static struct configfs_subsystem tsm_configfs = { .su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex), }; -int tsm_register(const struct tsm_ops *ops, void *priv, - const struct config_item_type *type) +int tsm_register(const struct tsm_ops *ops, void *priv) { const struct tsm_ops *conflict; - if (!type) - type = &tsm_report_default_type; - if (!(type == &tsm_report_default_type || type == &tsm_report_extra_type)) - return -EINVAL; - guard(rwsem_write)(&tsm_rwsem); conflict = provider.ops; if (conflict) { @@ -372,7 +461,6 @@ int tsm_register(const struct tsm_ops *ops, void *priv, provider.ops = ops; provider.data = priv; - provider.type = type; return 0; } EXPORT_SYMBOL_GPL(tsm_register); @@ -384,7 +472,6 @@ int tsm_unregister(const struct tsm_ops *ops) return -EBUSY; provider.ops = NULL; provider.data = NULL; - provider.type = NULL; return 0; } EXPORT_SYMBOL_GPL(tsm_unregister); |