summaryrefslogtreecommitdiff
path: root/arch/x86/virt
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/virt')
-rw-r--r--arch/x86/virt/svm/sev.c7
-rw-r--r--arch/x86/virt/vmx/tdx/tdx.c80
2 files changed, 52 insertions, 35 deletions
diff --git a/arch/x86/virt/svm/sev.c b/arch/x86/virt/svm/sev.c
index 942372e69b4d..ee643a6cd691 100644
--- a/arch/x86/virt/svm/sev.c
+++ b/arch/x86/virt/svm/sev.c
@@ -1029,7 +1029,7 @@ int rmp_make_shared(u64 pfn, enum pg_level level)
}
EXPORT_SYMBOL_GPL(rmp_make_shared);
-void snp_leak_pages(u64 pfn, unsigned int npages)
+void __snp_leak_pages(u64 pfn, unsigned int npages, bool dump_rmp)
{
struct page *page = pfn_to_page(pfn);
@@ -1052,14 +1052,15 @@ void snp_leak_pages(u64 pfn, unsigned int npages)
(PageHead(page) && compound_nr(page) <= npages))
list_add_tail(&page->buddy_list, &snp_leaked_pages_list);
- dump_rmpentry(pfn);
+ if (dump_rmp)
+ dump_rmpentry(pfn);
snp_nr_leaked_pages++;
pfn++;
page++;
}
spin_unlock(&snp_leaked_pages_list_lock);
}
-EXPORT_SYMBOL_GPL(snp_leak_pages);
+EXPORT_SYMBOL_GPL(__snp_leak_pages);
void kdump_sev_callback(void)
{
diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c
index c7a9a087ccaf..eac403248462 100644
--- a/arch/x86/virt/vmx/tdx/tdx.c
+++ b/arch/x86/virt/vmx/tdx/tdx.c
@@ -633,15 +633,19 @@ err:
}
/*
- * Convert TDX private pages back to normal by using MOVDIR64B to
- * clear these pages. Note this function doesn't flush cache of
- * these TDX private pages. The caller should make sure of that.
+ * Convert TDX private pages back to normal by using MOVDIR64B to clear these
+ * pages. Typically, any write to the page will convert it from TDX private back
+ * to normal kernel memory. Systems with the X86_BUG_TDX_PW_MCE erratum need to
+ * do the conversion explicitly via MOVDIR64B.
*/
-static void reset_tdx_pages(unsigned long base, unsigned long size)
+static void tdx_quirk_reset_paddr(unsigned long base, unsigned long size)
{
const void *zero_page = (const void *)page_address(ZERO_PAGE(0));
unsigned long phys, end;
+ if (!boot_cpu_has_bug(X86_BUG_TDX_PW_MCE))
+ return;
+
end = base + size;
for (phys = base; phys < end; phys += 64)
movdir64b(__va(phys), zero_page);
@@ -654,17 +658,23 @@ static void reset_tdx_pages(unsigned long base, unsigned long size)
mb();
}
-static void tdmr_reset_pamt(struct tdmr_info *tdmr)
+void tdx_quirk_reset_page(struct page *page)
+{
+ tdx_quirk_reset_paddr(page_to_phys(page), PAGE_SIZE);
+}
+EXPORT_SYMBOL_GPL(tdx_quirk_reset_page);
+
+static void tdmr_quirk_reset_pamt(struct tdmr_info *tdmr)
{
- tdmr_do_pamt_func(tdmr, reset_tdx_pages);
+ tdmr_do_pamt_func(tdmr, tdx_quirk_reset_paddr);
}
-static void tdmrs_reset_pamt_all(struct tdmr_info_list *tdmr_list)
+static void tdmrs_quirk_reset_pamt_all(struct tdmr_info_list *tdmr_list)
{
int i;
for (i = 0; i < tdmr_list->nr_consumed_tdmrs; i++)
- tdmr_reset_pamt(tdmr_entry(tdmr_list, i));
+ tdmr_quirk_reset_pamt(tdmr_entry(tdmr_list, i));
}
static unsigned long tdmrs_count_pamt_kb(struct tdmr_info_list *tdmr_list)
@@ -1136,15 +1146,7 @@ err_reset_pamts:
* to the kernel.
*/
wbinvd_on_all_cpus();
- /*
- * According to the TDX hardware spec, if the platform
- * doesn't have the "partial write machine check"
- * erratum, any kernel read/write will never cause #MC
- * in kernel space, thus it's OK to not convert PAMTs
- * back to normal. But do the conversion anyway here
- * as suggested by the TDX spec.
- */
- tdmrs_reset_pamt_all(&tdx_tdmr_list);
+ tdmrs_quirk_reset_pamt_all(&tdx_tdmr_list);
err_free_pamts:
tdmrs_free_pamt_all(&tdx_tdmr_list);
err_free_tdmrs:
@@ -1266,7 +1268,7 @@ static bool paddr_is_tdx_private(unsigned long phys)
return false;
/* Get page type from the TDX module */
- sret = __seamcall_ret(TDH_PHYMEM_PAGE_RDMD, &args);
+ sret = __seamcall_dirty_cache(__seamcall_ret, TDH_PHYMEM_PAGE_RDMD, &args);
/*
* The SEAMCALL will not return success unless there is a
@@ -1502,11 +1504,6 @@ static inline u64 tdx_tdr_pa(struct tdx_td *td)
return page_to_phys(td->tdr_page);
}
-static inline u64 tdx_tdvpr_pa(struct tdx_vp *td)
-{
- return page_to_phys(td->tdvpr_page);
-}
-
/*
* The TDX module exposes a CLFLUSH_BEFORE_ALLOC bit to specify whether
* a CLFLUSH of pages is required before handing them to the TDX module.
@@ -1518,11 +1515,11 @@ static void tdx_clflush_page(struct page *page)
clflush_cache_range(page_to_virt(page), PAGE_SIZE);
}
-noinstr __flatten u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *args)
+noinstr u64 tdh_vp_enter(struct tdx_vp *td, struct tdx_module_args *args)
{
- args->rcx = tdx_tdvpr_pa(td);
+ args->rcx = td->tdvpr_pa;
- return __seamcall_saved_ret(TDH_VP_ENTER, args);
+ return __seamcall_dirty_cache(__seamcall_saved_ret, TDH_VP_ENTER, args);
}
EXPORT_SYMBOL_GPL(tdh_vp_enter);
@@ -1581,7 +1578,7 @@ u64 tdh_vp_addcx(struct tdx_vp *vp, struct page *tdcx_page)
{
struct tdx_module_args args = {
.rcx = page_to_phys(tdcx_page),
- .rdx = tdx_tdvpr_pa(vp),
+ .rdx = vp->tdvpr_pa,
};
tdx_clflush_page(tdcx_page);
@@ -1650,7 +1647,7 @@ EXPORT_SYMBOL_GPL(tdh_mng_create);
u64 tdh_vp_create(struct tdx_td *td, struct tdx_vp *vp)
{
struct tdx_module_args args = {
- .rcx = tdx_tdvpr_pa(vp),
+ .rcx = vp->tdvpr_pa,
.rdx = tdx_tdr_pa(td),
};
@@ -1706,7 +1703,7 @@ EXPORT_SYMBOL_GPL(tdh_mr_finalize);
u64 tdh_vp_flush(struct tdx_vp *vp)
{
struct tdx_module_args args = {
- .rcx = tdx_tdvpr_pa(vp),
+ .rcx = vp->tdvpr_pa,
};
return seamcall(TDH_VP_FLUSH, &args);
@@ -1752,7 +1749,7 @@ EXPORT_SYMBOL_GPL(tdh_mng_init);
u64 tdh_vp_rd(struct tdx_vp *vp, u64 field, u64 *data)
{
struct tdx_module_args args = {
- .rcx = tdx_tdvpr_pa(vp),
+ .rcx = vp->tdvpr_pa,
.rdx = field,
};
u64 ret;
@@ -1769,7 +1766,7 @@ EXPORT_SYMBOL_GPL(tdh_vp_rd);
u64 tdh_vp_wr(struct tdx_vp *vp, u64 field, u64 data, u64 mask)
{
struct tdx_module_args args = {
- .rcx = tdx_tdvpr_pa(vp),
+ .rcx = vp->tdvpr_pa,
.rdx = field,
.r8 = data,
.r9 = mask,
@@ -1782,7 +1779,7 @@ EXPORT_SYMBOL_GPL(tdh_vp_wr);
u64 tdh_vp_init(struct tdx_vp *vp, u64 initial_rcx, u32 x2apicid)
{
struct tdx_module_args args = {
- .rcx = tdx_tdvpr_pa(vp),
+ .rcx = vp->tdvpr_pa,
.rdx = initial_rcx,
.r8 = x2apicid,
};
@@ -1870,3 +1867,22 @@ u64 tdh_phymem_page_wbinvd_hkid(u64 hkid, struct page *page)
return seamcall(TDH_PHYMEM_PAGE_WBINVD, &args);
}
EXPORT_SYMBOL_GPL(tdh_phymem_page_wbinvd_hkid);
+
+#ifdef CONFIG_KEXEC_CORE
+void tdx_cpu_flush_cache_for_kexec(void)
+{
+ lockdep_assert_preemption_disabled();
+
+ if (!this_cpu_read(cache_state_incoherent))
+ return;
+
+ /*
+ * Private memory cachelines need to be clean at the time of
+ * kexec. Write them back now, as the caller promises that
+ * there should be no more SEAMCALLs on this CPU.
+ */
+ wbinvd();
+ this_cpu_write(cache_state_incoherent, false);
+}
+EXPORT_SYMBOL_GPL(tdx_cpu_flush_cache_for_kexec);
+#endif