summaryrefslogtreecommitdiff
path: root/arch/x86/virt/vmx/tdx/tdx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/virt/vmx/tdx/tdx.c')
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.c521
1 files changed, 451 insertions, 70 deletions
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index 49a1c6890b55..c7a9a087ccaf 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -5,6 +5,7 @@
* Intel Trusted Domain Extensions (TDX) support
*/
+#include "asm/page_types.h"
#define pr_fmt(fmt) "virt/tdx: " fmt
#include <linux/types.h>
@@ -27,13 +28,14 @@
#include <linux/log2.h>
#include <linux/acpi.h>
#include <linux/suspend.h>
+#include <linux/idr.h>
#include <asm/page.h>
#include <asm/special_insns.h>
#include <asm/msr-index.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>
#include <asm/tdx.h>
-#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include "tdx.h"
@@ -42,6 +44,8 @@ static u32 tdx_global_keyid __ro_after_init;
static u32 tdx_guest_keyid_start __ro_after_init;
static u32 tdx_nr_guest_keyids __ro_after_init;
+static DEFINE_IDA(tdx_guest_keyid_pool);
+
static DEFINE_PER_CPU(bool, tdx_lp_initialized);
static struct tdmr_info_list tdx_tdmr_list;
@@ -52,6 +56,8 @@ static DEFINE_MUTEX(tdx_module_lock);
/* All TDX-usable memory regions. Protected by mem_hotplug_lock. */
static LIST_HEAD(tdx_memlist);
+static struct tdx_sys_info tdx_sysinfo;
+
typedef void (*sc_err_func_t)(u64 fn, u64 err, struct tdx_module_args *args);
static inline void seamcall_err(u64 fn, u64 err, struct tdx_module_args *args)
@@ -69,8 +75,9 @@ static inline void seamcall_err_ret(u64 fn, u64 err,
args->r9, args->r10, args->r11);
}
-static inline int sc_retry_prerr(sc_func_t func, sc_err_func_t err_func,
- u64 fn, struct tdx_module_args *args)
+static __always_inline int sc_retry_prerr(sc_func_t func,
+ sc_err_func_t err_func,
+ u64 fn, struct tdx_module_args *args)
{
u64 sret = sc_retry(func, fn, args);
@@ -270,57 +277,15 @@ static int read_sys_metadata_field(u64 field_id, u64 *data)
return 0;
}
-static int read_sys_metadata_field16(u64 field_id,
- int offset,
- struct tdx_tdmr_sysinfo *ts)
-{
- u16 *ts_member = ((void *)ts) + offset;
- u64 tmp;
- int ret;
-
- if (WARN_ON_ONCE(MD_FIELD_ID_ELE_SIZE_CODE(field_id) !=
- MD_FIELD_ID_ELE_SIZE_16BIT))
- return -EINVAL;
-
- ret = read_sys_metadata_field(field_id, &tmp);
- if (ret)
- return ret;
-
- *ts_member = tmp;
-
- return 0;
-}
-
-struct field_mapping {
- u64 field_id;
- int offset;
-};
+#include "tdx_global_metadata.c"
-#define TD_SYSINFO_MAP(_field_id, _offset) \
- { .field_id = MD_FIELD_ID_##_field_id, \
- .offset = offsetof(struct tdx_tdmr_sysinfo, _offset) }
-
-/* Map TD_SYSINFO fields into 'struct tdx_tdmr_sysinfo': */
-static const struct field_mapping fields[] = {
- TD_SYSINFO_MAP(MAX_TDMRS, max_tdmrs),
- TD_SYSINFO_MAP(MAX_RESERVED_PER_TDMR, max_reserved_per_tdmr),
- TD_SYSINFO_MAP(PAMT_4K_ENTRY_SIZE, pamt_entry_size[TDX_PS_4K]),
- TD_SYSINFO_MAP(PAMT_2M_ENTRY_SIZE, pamt_entry_size[TDX_PS_2M]),
- TD_SYSINFO_MAP(PAMT_1G_ENTRY_SIZE, pamt_entry_size[TDX_PS_1G]),
-};
-
-static int get_tdx_tdmr_sysinfo(struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+static int check_features(struct tdx_sys_info *sysinfo)
{
- int ret;
- int i;
+ u64 tdx_features0 = sysinfo->features.tdx_features0;
- /* Populate 'tdmr_sysinfo' fields using the mapping structure above: */
- for (i = 0; i < ARRAY_SIZE(fields); i++) {
- ret = read_sys_metadata_field16(fields[i].field_id,
- fields[i].offset,
- tdmr_sysinfo);
- if (ret)
- return ret;
+ if (!(tdx_features0 & TDX_FEATURES0_NO_RBP_MOD)) {
+ pr_err("frame pointer (RBP) clobber bug present, upgrade TDX module\n");
+ return -EINVAL;
}
return 0;
@@ -342,13 +307,13 @@ static int tdmr_size_single(u16 max_reserved_per_tdmr)
}
static int alloc_tdmr_list(struct tdmr_info_list *tdmr_list,
- struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+ struct tdx_sys_info_tdmr *sysinfo_tdmr)
{
size_t tdmr_sz, tdmr_array_sz;
void *tdmr_array;
- tdmr_sz = tdmr_size_single(tdmr_sysinfo->max_reserved_per_tdmr);
- tdmr_array_sz = tdmr_sz * tdmr_sysinfo->max_tdmrs;
+ tdmr_sz = tdmr_size_single(sysinfo_tdmr->max_reserved_per_tdmr);
+ tdmr_array_sz = tdmr_sz * sysinfo_tdmr->max_tdmrs;
/*
* To keep things simple, allocate all TDMRs together.
@@ -367,7 +332,7 @@ static int alloc_tdmr_list(struct tdmr_info_list *tdmr_list,
* at a given index in the TDMR list.
*/
tdmr_list->tdmr_sz = tdmr_sz;
- tdmr_list->max_tdmrs = tdmr_sysinfo->max_tdmrs;
+ tdmr_list->max_tdmrs = sysinfo_tdmr->max_tdmrs;
tdmr_list->nr_consumed_tdmrs = 0;
return 0;
@@ -921,25 +886,29 @@ static int tdmrs_populate_rsvd_areas_all(struct tdmr_info_list *tdmr_list,
/*
* Construct a list of TDMRs on the preallocated space in @tdmr_list
* to cover all TDX memory regions in @tmb_list based on the TDX module
- * TDMR global information in @tdmr_sysinfo.
+ * TDMR global information in @sysinfo_tdmr.
*/
static int construct_tdmrs(struct list_head *tmb_list,
struct tdmr_info_list *tdmr_list,
- struct tdx_tdmr_sysinfo *tdmr_sysinfo)
+ struct tdx_sys_info_tdmr *sysinfo_tdmr)
{
+ u16 pamt_entry_size[TDX_PS_NR] = {
+ sysinfo_tdmr->pamt_4k_entry_size,
+ sysinfo_tdmr->pamt_2m_entry_size,
+ sysinfo_tdmr->pamt_1g_entry_size,
+ };
int ret;
ret = fill_out_tdmrs(tmb_list, tdmr_list);
if (ret)
return ret;
- ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list,
- tdmr_sysinfo->pamt_entry_size);
+ ret = tdmrs_set_up_pamt_all(tdmr_list, tmb_list, pamt_entry_size);
if (ret)
return ret;
ret = tdmrs_populate_rsvd_areas_all(tdmr_list, tmb_list,
- tdmr_sysinfo->max_reserved_per_tdmr);
+ sysinfo_tdmr->max_reserved_per_tdmr);
if (ret)
tdmrs_free_pamt_all(tdmr_list);
@@ -1098,9 +1067,17 @@ static int init_tdmrs(struct tdmr_info_list *tdmr_list)
static int init_tdx_module(void)
{
- struct tdx_tdmr_sysinfo tdmr_sysinfo;
int ret;
+ ret = get_tdx_sys_info(&tdx_sysinfo);
+ if (ret)
+ return ret;
+
+ /* Check whether the kernel can support this module */
+ ret = check_features(&tdx_sysinfo);
+ if (ret)
+ return ret;
+
/*
* To keep things simple, assume that all TDX-protected memory
* will come from the page allocator. Make sure all pages in the
@@ -1117,17 +1094,13 @@ static int init_tdx_module(void)
if (ret)
goto out_put_tdxmem;
- ret = get_tdx_tdmr_sysinfo(&tdmr_sysinfo);
- if (ret)
- goto err_free_tdxmem;
-
/* Allocate enough space for constructing TDMRs */
- ret = alloc_tdmr_list(&tdx_tdmr_list, &tdmr_sysinfo);
+ ret = alloc_tdmr_list(&tdx_tdmr_list, &tdx_sysinfo.tdmr);
if (ret)
goto err_free_tdxmem;
/* Cover all TDX-usable memory regions in TDMRs */
- ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &tdmr_sysinfo);
+ ret = construct_tdmrs(&tdx_memlist, &tdx_tdmr_list, &tdx_sysinfo.tdmr);
if (ret)
goto err_free_tdmrs;
@@ -1426,9 +1399,9 @@ static void __init check_tdx_erratum(void)
* private memory poisons that memory, and a subsequent read of
* that memory triggers #MC.
*/
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
setup_force_cpu_bug(X86_BUG_TDX_PW_MCE);
}
}
@@ -1489,3 +1462,411 @@ void __init tdx_init(void)
check_tdx_erratum();
}
+
+const struct tdx_sys_info *tdx_get_sysinfo(void)
+{
+ const struct tdx_sys_info *p = NULL;
+
+ /* Make sure all fields in @tdx_sysinfo have been populated */
+ mutex_lock(&tdx_module_lock);
+ if (tdx_module_status == TDX_MODULE_INITIALIZED)
+ p = (const struct tdx_sys_info *)&tdx_sysinfo;
+ mutex_unlock(&tdx_module_lock);
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(tdx_get_sysinfo);
+
+u32 tdx_get_nr_guest_keyids(void)
+{
+ return tdx_nr_guest_keyids;
+}
+EXPORT_SYMBOL_GPL(tdx_get_nr_guest_keyids);
+
+int tdx_guest_keyid_alloc(void)
+{
+ return ida_alloc_range(&tdx_guest_keyid_pool, tdx_guest_keyid_start,
+ tdx_guest_keyid_start + tdx_nr_guest_keyids - 1,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(tdx_guest_keyid_alloc);
+
+void tdx_guest_keyid_free(unsigned int keyid)
+{
+ ida_free(&tdx_guest_keyid_pool, keyid);
+}
+EXPORT_SYMBOL_GPL(tdx_guest_keyid_free);
+
+static inline u64 tdx_tdr_pa(struct tdx_td *td)
+{
+ return page_to_phys(td->tdr_page);
+}
+
+static inline u64 tdx_tdvpr_pa(struct tdx_vp *td)
+{
+ return page_to_phys(td->tdvpr_page);
+}
+
+/*
+ * The TDX module exposes a CLFLUSH_BEFORE_ALLOC bit to specify whether
+ * a CLFLUSH of pages is required before handing them to the TDX module.
+ * Be conservative and make the code simpler by doing the CLFLUSH
+ * unconditionally.
+ */
+static void tdx_clflush_page(struct page *page)
+{
+ clflush_cache_range(page_to_virt(page), PAGE_SIZE);
+}
+
+noinstr __flatten u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *args)
+{
+ args->rcx = tdx_tdvpr_pa(td);
+
+ return __seamcall_saved_ret(TDH_VP_ENTER, args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_enter);
+
+u64 tdh_mng_addcx(struct tdx_td *td, struct page *tdcs_page)
+{
+ struct tdx_module_args args = {
+ .rcx = page_to_phys(tdcs_page),
+ .rdx = tdx_tdr_pa(td),
+ };
+
+ tdx_clflush_page(tdcs_page);
+ return seamcall(TDH_MNG_ADDCX, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_addcx);
+
+u64 tdh_mem_page_add(struct tdx_td *td, u64 gpa, struct page *page, struct page *source, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa,
+ .rdx = tdx_tdr_pa(td),
+ .r8 = page_to_phys(page),
+ .r9 = page_to_phys(source),
+ };
+ u64 ret;
+
+ tdx_clflush_page(page);
+ ret = seamcall_ret(TDH_MEM_PAGE_ADD, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_page_add);
+
+u64 tdh_mem_sept_add(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ .r8 = page_to_phys(page),
+ };
+ u64 ret;
+
+ tdx_clflush_page(page);
+ ret = seamcall_ret(TDH_MEM_SEPT_ADD, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_sept_add);
+
+u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page)
+{
+ struct tdx_module_args args = {
+ .rcx = page_to_phys(tdcx_page),
+ .rdx = tdx_tdvpr_pa(vp),
+ };
+
+ tdx_clflush_page(tdcx_page);
+ return seamcall(TDH_VP_ADDCX, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_addcx);
+
+u64 tdh_mem_page_aug(struct tdx_td *td, u64 gpa, int level, struct page *page, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ .r8 = page_to_phys(page),
+ };
+ u64 ret;
+
+ tdx_clflush_page(page);
+ ret = seamcall_ret(TDH_MEM_PAGE_AUG, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_page_aug);
+
+u64 tdh_mem_range_block(struct tdx_td *td, u64 gpa, int level, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MEM_RANGE_BLOCK, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_range_block);
+
+u64 tdh_mng_key_config(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MNG_KEY_CONFIG, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_key_config);
+
+u64 tdh_mng_create(struct tdx_td *td, u16 hkid)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ .rdx = hkid,
+ };
+
+ tdx_clflush_page(td->tdr_page);
+ return seamcall(TDH_MNG_CREATE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_create);
+
+u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = tdx_tdr_pa(td),
+ };
+
+ tdx_clflush_page(vp->tdvpr_page);
+ return seamcall(TDH_VP_CREATE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_create);
+
+u64 tdh_mng_rd(struct tdx_td *td, u64 field, u64 *data)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ .rdx = field,
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MNG_RD, &args);
+
+ /* R8: Content of the field, or 0 in case of error. */
+ *data = args.r8;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mng_rd);
+
+u64 tdh_mr_extend(struct tdx_td *td, u64 gpa, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa,
+ .rdx = tdx_tdr_pa(td),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MR_EXTEND, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mr_extend);
+
+u64 tdh_mr_finalize(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MR_FINALIZE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mr_finalize);
+
+u64 tdh_vp_flush(struct tdx_vp *vp)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ };
+
+ return seamcall(TDH_VP_FLUSH, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_flush);
+
+u64 tdh_mng_vpflushdone(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MNG_VPFLUSHDONE, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_vpflushdone);
+
+u64 tdh_mng_key_freeid(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MNG_KEY_FREEID, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mng_key_freeid);
+
+u64 tdh_mng_init(struct tdx_td *td, u64 td_params, u64 *extended_err)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ .rdx = td_params,
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MNG_INIT, &args);
+
+ *extended_err = args.rcx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mng_init);
+
+u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = field,
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_VP_RD, &args);
+
+ /* R8: Content of the field, or 0 in case of error. */
+ *data = args.r8;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_vp_rd);
+
+u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = field,
+ .r8 = data,
+ .r9 = mask,
+ };
+
+ return seamcall(TDH_VP_WR, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_wr);
+
+u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdvpr_pa(vp),
+ .rdx = initial_rcx,
+ .r8 = x2apicid,
+ };
+
+ /* apicid requires version == 1. */
+ return seamcall(TDH_VP_INIT | (1ULL << TDX_VERSION_SHIFT), &args);
+}
+EXPORT_SYMBOL_GPL(tdh_vp_init);
+
+/*
+ * TDX ABI defines output operands as PT, OWNER and SIZE. These are TDX defined fomats.
+ * So despite the names, they must be interpted specially as described by the spec. Return
+ * them only for error reporting purposes.
+ */
+u64 tdh_phymem_page_reclaim(struct page *page, u64 *tdx_pt, u64 *tdx_owner, u64 *tdx_size)
+{
+ struct tdx_module_args args = {
+ .rcx = page_to_phys(page),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_PHYMEM_PAGE_RECLAIM, &args);
+
+ *tdx_pt = args.rcx;
+ *tdx_owner = args.rdx;
+ *tdx_size = args.r8;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_page_reclaim);
+
+u64 tdh_mem_track(struct tdx_td *td)
+{
+ struct tdx_module_args args = {
+ .rcx = tdx_tdr_pa(td),
+ };
+
+ return seamcall(TDH_MEM_TRACK, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_mem_track);
+
+u64 tdh_mem_page_remove(struct tdx_td *td, u64 gpa, u64 level, u64 *ext_err1, u64 *ext_err2)
+{
+ struct tdx_module_args args = {
+ .rcx = gpa | level,
+ .rdx = tdx_tdr_pa(td),
+ };
+ u64 ret;
+
+ ret = seamcall_ret(TDH_MEM_PAGE_REMOVE, &args);
+
+ *ext_err1 = args.rcx;
+ *ext_err2 = args.rdx;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tdh_mem_page_remove);
+
+u64 tdh_phymem_cache_wb(bool resume)
+{
+ struct tdx_module_args args = {
+ .rcx = resume ? 1 : 0,
+ };
+
+ return seamcall(TDH_PHYMEM_CACHE_WB, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_cache_wb);
+
+u64 tdh_phymem_page_wbinvd_tdr(struct tdx_td *td)
+{
+ struct tdx_module_args args = {};
+
+ args.rcx = mk_keyed_paddr(tdx_global_keyid, td->tdr_page);
+
+ return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_tdr);
+
+u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
+{
+ struct tdx_module_args args = {};
+
+ args.rcx = mk_keyed_paddr(hkid, page);
+
+ return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
+}
+EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);