From 6c690ee1039b251e583fc65b28da30e97d6a7385 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Mon, 12 Jun 2017 10:26:14 -0700 Subject: x86/mm: Split read_cr3() into read_cr3_pa() and __read_cr3() The kernel has several code paths that read CR3. Most of them assume that CR3 contains the PGD's physical address, whereas some of them awkwardly use PHYSICAL_PAGE_MASK to mask off low bits. Add explicit mask macros for CR3 and convert all of the CR3 readers. This will keep them from breaking when PCID is enabled. Signed-off-by: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tom Lendacky Cc: xen-devel Link: http://lkml.kernel.org/r/883f8fb121f4616c1c1427ad87350bb2f5ffeca1.1497288170.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/pagetable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/boot/compressed') diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index 1d78f1739087..8e69df96492e 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -92,7 +92,7 @@ void initialize_identity_maps(void) * and we must append to the existing area instead of entirely * overwriting it. */ - level4p = read_cr3(); + level4p = read_cr3_pa(); if (level4p == (unsigned long)_pgtable) { debug_putstr("booted via startup_32()\n"); pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; -- cgit From f8fceacbd1eaebe67997798867d177b36a0a6219 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 6 Jun 2017 14:31:22 +0300 Subject: x86/boot/efi: Cleanup initialization of GDT entries This is preparation for following patches without changing semantics of the code. Signed-off-by: Kirill A. Shutemov Reviewed-by: Matt Fleming Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170606113133.22974-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/eboot.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) (limited to 'arch/x86/boot/compressed') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index cbf4b87f55b9..1d5093d1f319 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1049,6 +1049,7 @@ struct boot_params *efi_main(struct efi_config *c, /* The first GDT is a dummy and the second is unused. */ desc += 2; + /* __KERNEL_CS */ desc->limit0 = 0xffff; desc->base0 = 0x0000; desc->base1 = 0x0000; @@ -1062,8 +1063,9 @@ struct boot_params *efi_main(struct efi_config *c, desc->d = SEG_OP_SIZE_32BIT; desc->g = SEG_GRANULARITY_4KB; desc->base2 = 0x00; - desc++; + + /* __KERNEL_DS */ desc->limit0 = 0xffff; desc->base0 = 0x0000; desc->base1 = 0x0000; @@ -1077,24 +1079,25 @@ struct boot_params *efi_main(struct efi_config *c, desc->d = SEG_OP_SIZE_32BIT; desc->g = SEG_GRANULARITY_4KB; desc->base2 = 0x00; - -#ifdef CONFIG_X86_64 - /* Task segment value */ desc++; - desc->limit0 = 0x0000; - desc->base0 = 0x0000; - desc->base1 = 0x0000; - desc->type = SEG_TYPE_TSS; - desc->s = 0; - desc->dpl = 0; - desc->p = 1; - desc->limit = 0x0; - desc->avl = 0; - desc->l = 0; - desc->d = 0; - desc->g = SEG_GRANULARITY_4KB; - desc->base2 = 0x00; -#endif /* CONFIG_X86_64 */ + + if (IS_ENABLED(CONFIG_X86_64)) { + /* Task segment value */ + desc->limit0 = 0x0000; + desc->base0 = 0x0000; + desc->base1 = 0x0000; + desc->type = SEG_TYPE_TSS; + desc->s = 0; + desc->dpl = 0; + desc->p = 1; + desc->limit = 0x0; + desc->avl = 0; + desc->l = 0; + desc->d = 0; + desc->g = SEG_GRANULARITY_4KB; + desc->base2 = 0x00; + desc++; + } asm volatile("cli"); asm volatile ("lgdt %0" : : "m" (*gdt)); -- cgit From 4c94117c7fa1bea36a15157022dbe5efee474340 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 6 Jun 2017 14:31:23 +0300 Subject: x86/boot/efi: Fix __KERNEL_CS definition of GDT entry on 64-bit configurations Define __KERNEL_CS GDT entry as long mode (.L=1, .D=0) on 64-bit configurations. Signed-off-by: Kirill A. Shutemov Reviewed-by: Matt Fleming Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170606113133.22974-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/eboot.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch/x86/boot/compressed') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 1d5093d1f319..b75c20f779ea 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1059,8 +1059,13 @@ struct boot_params *efi_main(struct efi_config *c, desc->p = 1; desc->limit = 0xf; desc->avl = 0; - desc->l = 0; - desc->d = SEG_OP_SIZE_32BIT; + if (IS_ENABLED(CONFIG_X86_64)) { + desc->l = 1; + desc->d = 0; + } else { + desc->l = 0; + desc->d = SEG_OP_SIZE_32BIT; + } desc->g = SEG_GRANULARITY_4KB; desc->base2 = 0x00; desc++; -- cgit From 919a02d1280a3845bac70e6b7891ca070f21a60a Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 6 Jun 2017 14:31:24 +0300 Subject: x86/boot/efi: Define __KERNEL32_CS GDT on 64-bit configurations We would need to switch temporarily to compatibility mode during booting with 5-level paging enabled. It would require 32-bit code segment descriptor. Signed-off-by: Kirill A. Shutemov Reviewed-by: Matt Fleming Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170606113133.22974-6-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/eboot.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'arch/x86/boot/compressed') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index b75c20f779ea..c3e869eaef0c 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1046,8 +1046,29 @@ struct boot_params *efi_main(struct efi_config *c, memset((char *)gdt->address, 0x0, gdt->size); desc = (struct desc_struct *)gdt->address; - /* The first GDT is a dummy and the second is unused. */ - desc += 2; + /* The first GDT is a dummy. */ + desc++; + + if (IS_ENABLED(CONFIG_X86_64)) { + /* __KERNEL32_CS */ + desc->limit0 = 0xffff; + desc->base0 = 0x0000; + desc->base1 = 0x0000; + desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ; + desc->s = DESC_TYPE_CODE_DATA; + desc->dpl = 0; + desc->p = 1; + desc->limit = 0xf; + desc->avl = 0; + desc->l = 0; + desc->d = SEG_OP_SIZE_32BIT; + desc->g = SEG_GRANULARITY_4KB; + desc->base2 = 0x00; + desc++; + } else { + /* Second entry is unused on 32-bit */ + desc++; + } /* __KERNEL_CS */ desc->limit0 = 0xffff; -- cgit From 34bbb0009f3b7a5eef1ab34f14e5dbf7b8fc389c Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 6 Jun 2017 14:31:25 +0300 Subject: x86/boot/compressed: Enable 5-level paging during decompression stage We need to cover two basic cases: when bootloader left us in 32-bit mode and when bootloader enabled long mode. The patch implements unified codepath to enabled 5-level paging for both cases. It means case when we start in 32-bit mode, we first enable long mode with 4-level and then switch over to 5-level paging. Switching from 4-level to 5-level paging is not trivial. We cannot do it directly. Setting LA57 in long mode would trigger #GP. So we need to switch off long mode first and the then re-enable with 5-level paging. NOTE: The need of switching off long mode means we are in trouble if bootloader put us above 4G boundary. If bootloader wants to boot 5-level paging kernel, it has to put kernel below 4G or enable 5-level paging on it's own, so we could avoid the step. Signed-off-by: Kirill A. Shutemov Cc: Andrew Morton Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170606113133.22974-7-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 86 +++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) (limited to 'arch/x86/boot/compressed') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index d2ae1f821e0c..fbf4c32d0b62 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -346,6 +346,48 @@ preferred_addr: /* Set up the stack */ leaq boot_stack_end(%rbx), %rsp +#ifdef CONFIG_X86_5LEVEL + /* Check if 5-level paging has already enabled */ + movq %cr4, %rax + testl $X86_CR4_LA57, %eax + jnz lvl5 + + /* + * At this point we are in long mode with 4-level paging enabled, + * but we want to enable 5-level paging. + * + * The problem is that we cannot do it directly. Setting LA57 in + * long mode would trigger #GP. So we need to switch off long mode + * first. + * + * NOTE: This is not going to work if bootloader put us above 4G + * limit. + * + * The first step is go into compatibility mode. + */ + + /* Clear additional page table */ + leaq lvl5_pgtable(%rbx), %rdi + xorq %rax, %rax + movq $(PAGE_SIZE/8), %rcx + rep stosq + + /* + * Setup current CR3 as the first and only entry in a new top level + * page table. + */ + movq %cr3, %rdi + leaq 0x7 (%rdi), %rax + movq %rax, lvl5_pgtable(%rbx) + + /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ + pushq $__KERNEL32_CS + leaq compatible_mode(%rip), %rax + pushq %rax + lretq +lvl5: +#endif + /* Zero EFLAGS */ pushq $0 popfq @@ -429,6 +471,44 @@ relocated: jmp *%rax .code32 +#ifdef CONFIG_X86_5LEVEL +compatible_mode: + /* Setup data and stack segments */ + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %ss + + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + + /* Point CR3 to 5-level paging */ + leal lvl5_pgtable(%ebx), %eax + movl %eax, %cr3 + + /* Enable PAE and LA57 mode */ + movl %cr4, %eax + orl $(X86_CR4_PAE | X86_CR4_LA57), %eax + movl %eax, %cr4 + + /* Calculate address we are running at */ + call 1f +1: popl %edi + subl $1b, %edi + + /* Prepare stack for far return to Long Mode */ + pushl $__KERNEL_CS + leal lvl5(%edi), %eax + push %eax + + /* Enable paging back */ + movl $(X86_CR0_PG | X86_CR0_PE), %eax + movl %eax, %cr0 + + lret +#endif + no_longmode: /* This isn't an x86-64 CPU so hang */ 1: @@ -442,7 +522,7 @@ gdt: .word gdt_end - gdt .long gdt .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00cf9a000000ffff /* __KERNEL32_CS */ .quad 0x00af9a000000ffff /* __KERNEL_CS */ .quad 0x00cf92000000ffff /* __KERNEL_DS */ .quad 0x0080890000000000 /* TS descriptor */ @@ -486,3 +566,7 @@ boot_stack_end: .balign 4096 pgtable: .fill BOOT_PGT_SIZE, 1, 0 +#ifdef CONFIG_X86_5LEVEL +lvl5_pgtable: + .fill PAGE_SIZE, 1, 0 +#endif -- cgit From a24261d70e00e4ce03cf45bbf18398f52a7b9229 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 28 Jun 2017 15:17:30 +0300 Subject: x86/KASLR: Fix detection 32/64 bit bootloaders for 5-level paging KASLR uses hack to detect whether we booted via startup_32() or startup_64(): it checks what is loaded into cr3 and compares it to _pgtables. _pgtables is the array of page tables where early code allocates page table from. KASLR expects cr3 to point to _pgtables if we booted via startup_32(), but that's not true if we booted with 5-level paging enabled. In this case top level page table is allocated separately and only the first p4d page table is allocated from the array. Let's modify the check to cover both 4- and 5-level paging cases. The patch also renames 'level4p' to 'top_level_pgt' as it now can hold page table for 4th or 5th level, depending on configuration. Signed-off-by: Kirill A. Shutemov Acked-by: Kees Cook Cc: Andrew Morton Cc: Andy Lutomirski Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170628121730.43079-1-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/pagetable.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'arch/x86/boot/compressed') diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index 8e69df96492e..28029be47fbb 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -63,7 +63,7 @@ static void *alloc_pgt_page(void *context) static struct alloc_pgt_data pgt_data; /* The top level page table entry pointer. */ -static unsigned long level4p; +static unsigned long top_level_pgt; /* * Mapping information structure passed to kernel_ident_mapping_init(). @@ -91,9 +91,15 @@ void initialize_identity_maps(void) * If we came here via startup_32(), cr3 will be _pgtable already * and we must append to the existing area instead of entirely * overwriting it. + * + * With 5-level paging, we use '_pgtable' to allocate the p4d page table, + * the top-level page table is allocated separately. + * + * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level + * cases. On 4-level paging it's equal to 'top_level_pgt'. */ - level4p = read_cr3_pa(); - if (level4p == (unsigned long)_pgtable) { + top_level_pgt = read_cr3_pa(); + if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) { debug_putstr("booted via startup_32()\n"); pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; @@ -103,7 +109,7 @@ void initialize_identity_maps(void) pgt_data.pgt_buf = _pgtable; pgt_data.pgt_buf_size = BOOT_PGT_SIZE; memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); - level4p = (unsigned long)alloc_pgt_page(&pgt_data); + top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data); } } @@ -123,7 +129,7 @@ void add_identity_map(unsigned long start, unsigned long size) return; /* Build the mapping. */ - kernel_ident_mapping_init(&mapping_info, (pgd_t *)level4p, + kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, start, end); } @@ -134,5 +140,5 @@ void add_identity_map(unsigned long start, unsigned long size) */ void finalize_identity_maps(void) { - write_cr3(level4p); + write_cr3(top_level_pgt); } -- cgit