summaryrefslogtreecommitdiff
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2021-09-30 14:36:16 +0200
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2021-11-02 07:45:43 -0500
commitd2a3ef44c2a2e49fc08270a9d33f0e4918ee2cb9 (patch)
tree6d4d50e39e3e97a738f19fecb2e78ea3db5ed3f2 /arch/x86/xen
parent4c360db6ccdb12d82c5c255c90c249970f5d1956 (diff)
xen/x86: adjust handling of the L3 user vsyscall special page table
Marking the page tableas pinned without ever actually pinning is was probably an oversight in the first place. The main reason for the change is more subtle, though: The write of the one present entry each here and in the subsequently allocated L2 table engage a code path in the hypervisor which exists only for thought-to-be-broken guests: An mmu- update operation to a page which is neither a page table nor marked writable. The hypervisor merely assumes (or should I say "hopes") that the fact that a writable reference to the page can be obtained means it is okay to actually write to that page in response to such a hypercall. While there make all involved code and data dependent upon X86_VSYSCALL_EMULATION (some code was already). Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Link: https://lore.kernel.org/r/1048f5b8-b726-dcc1-1216-9d5ac328ce82@suse.com Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/mmu_pv.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index ad0fbc7e4872..325a67910855 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -86,8 +86,10 @@
#include "mmu.h"
#include "debugfs.h"
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
/* l3 pud for userspace vsyscall mapping */
static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
+#endif
/*
* Protects atomic reservation decrease/increase against concurrent increases.
@@ -791,7 +793,9 @@ static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
static void __init xen_after_bootmem(void)
{
static_branch_enable(&xen_struct_pages_ready);
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
SetPagePinned(virt_to_page(level3_user_vsyscall));
+#endif
xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
}
@@ -1762,7 +1766,6 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
@@ -1779,6 +1782,13 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Unpin Xen-provided one */
pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
+ /* Pin user vsyscall L3 */
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+ pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
+ PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
+#endif
+
/*
* At this stage there can be no user pgd, and no page structure to
* attach it to, so make sure we just set kernel pgd.