From e6fc454b7794fc45c27364c7896b8f03094635ee Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 15 Oct 2017 17:02:03 +0100 Subject: x86/cpu/intel_cacheinfo: Remove redundant assignment to 'this_leaf' The 'this_leaf' variable is assigned a value that is never read and it is updated a little later with a newer value, hence we can remove the redundant assignment. Cleans up the following Clang warning: Value stored to 'this_leaf' is never read Signed-off-by: Colin Ian King Reviewed-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Link: http://lkml.kernel.org/r/20171015160203.12332-1-colin.king@canonical.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel_cacheinfo.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 24f749324c0f..9990a71e311f 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { unsigned int apicid, nshared, first, last; - this_leaf = this_cpu_ci->info_list + index; nshared = base->eax.split.num_threads_sharing + 1; apicid = cpu_data(cpu).apicid; first = apicid - (apicid % nshared); -- cgit From 9c48c0965b97e14ddcf75490a754e84e05aaa062 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 16 Oct 2017 12:12:16 +0200 Subject: x86/idt: Initialize early IDT before cr4_init_shadow() Moving the early IDT setup out of assembly code breaks the boot on first generation 486 systems. The reason is that the call of idt_setup_early_handler, which sets up the early handlers was added after the call to cr4_init_shadow(). cr4_init_shadow() tries to read CR4 which is not available on those systems. The accessor function uses a extable fixup to handle the resulting fault. As the IDT is not set up yet, the cr4 read exception causes an instantaneous reboot for obvious reasons. Call idt_setup_early_handler() before cr4_init_shadow() so IDT is set up before the first exception hits. Fixes: 87e81786b13b ("x86/idt: Move early IDT setup out of 32-bit asm") Reported-and-tested-by: Matthew Whitehead Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1710161210290.1973@nanos --- arch/x86/kernel/head32.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index cf2ce063f65a..2902ca4d5993 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void) asmlinkage __visible void __init i386_start_kernel(void) { - cr4_init_shadow(); - + /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); + cr4_init_shadow(); + sanitize_boot_params(&boot_params); x86_early_init_platform_quirks(); -- cgit From 723f2828a98c8ca19842042f418fb30dd8cfc0f7 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 18 Oct 2017 13:12:25 +0200 Subject: x86/microcode/intel: Disable late loading on model 79 Blacklist Broadwell X model 79 for late loading due to an erratum. Signed-off-by: Borislav Petkov Acked-by: Tony Luck Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20171018111225.25635-1-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/microcode/intel.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 8f7a9bbad514..7dbcb7adf797 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n) return 0; } +static bool is_blacklisted(unsigned int cpu) +{ + struct cpuinfo_x86 *c = &cpu_data(cpu); + + if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { + pr_err_once("late loading on model 79 is disabled.\n"); + return true; + } + + return false; +} + static enum ucode_state request_microcode_fw(int cpu, struct device *device, bool refresh_fw) { @@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, const struct firmware *firmware; enum ucode_state ret; + if (is_blacklisted(cpu)) + return UCODE_NFOUND; + sprintf(name, "intel-ucode/%02x-%02x-%02x", c->x86, c->x86_model, c->x86_mask); @@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n) static enum ucode_state request_microcode_user(int cpu, const void __user *buf, size_t size) { + if (is_blacklisted(cpu)) + return UCODE_NFOUND; + return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); } -- cgit From e8b9b0cc8269c85d8167aaee024bfcbb4976c031 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 14 Oct 2017 09:59:49 -0700 Subject: x86/mm/64: Remove the last VM_BUG_ON() from the TLB code Let's avoid hard-to-diagnose crashes in the future. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/f423bbc97864089fbdeb813f1ea126c6eaed844a.1508000261.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/tlb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 658bf0090565..7db23f9f804e 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -147,8 +147,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, this_cpu_write(cpu_tlbstate.is_lazy, false); if (real_prev == next) { - VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != - next->context.ctx_id); + VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != + next->context.ctx_id); /* * We don't currently support having a real mm loaded without -- cgit From 4e57b94664fef55aa71cac33b4632fdfdd52b695 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 14 Oct 2017 09:59:50 -0700 Subject: x86/mm: Tidy up "x86/mm: Flush more aggressively in lazy TLB mode" Due to timezones, commit: b956575bed91 ("x86/mm: Flush more aggressively in lazy TLB mode") was an outdated patch that well tested and fixed the bug but didn't address Borislav's review comments. Tidy it up: - The name "tlb_use_lazy_mode()" was highly confusing. Change it to "tlb_defer_switch_to_init_mm()", which describes what it actually means. - Move the static_branch crap into a helper. - Improve comments. Actually removing the debugfs option is in the next patch. Reported-by: Borislav Petkov Signed-off-by: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: b956575bed91 ("x86/mm: Flush more aggressively in lazy TLB mode") Link: http://lkml.kernel.org/r/154ef95428d4592596b6e98b0af1d2747d6cfbf8.1508000261.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/tlbflush.h | 7 ++++++- arch/x86/mm/tlb.c | 30 ++++++++++++++++++------------ 2 files changed, 24 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d362161d3291..0d4a1bb7e303 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -87,7 +87,12 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) * to init_mm when we switch to a kernel thread (e.g. the idle thread). If * it's false, then we immediately switch CR3 when entering a kernel thread. */ -DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode); +DECLARE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm); + +static inline bool tlb_defer_switch_to_init_mm(void) +{ + return static_branch_unlikely(&__tlb_defer_switch_to_init_mm); +} /* * 6 because 6 should be plenty and struct tlb_state will fit in diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 7db23f9f804e..5ee3b59baa85 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -30,7 +30,7 @@ atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); -DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode); +DEFINE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm); static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, u16 *new_asid, bool *need_flush) @@ -213,6 +213,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, } /* + * Please ignore the name of this function. It should be called + * switch_to_kernel_thread(). + * * enter_lazy_tlb() is a hint from the scheduler that we are entering a * kernel thread or other context without an mm. Acceptable implementations * include doing nothing whatsoever, switching to init_mm, or various clever @@ -227,7 +230,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) return; - if (static_branch_unlikely(&tlb_use_lazy_mode)) { + if (tlb_defer_switch_to_init_mm()) { /* * There's a significant optimization that may be possible * here. We have accurate enough TLB flush tracking that we @@ -632,7 +635,8 @@ static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf, { char buf[2]; - buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0'; + buf[0] = static_branch_likely(&__tlb_defer_switch_to_init_mm) + ? '1' : '0'; buf[1] = '\n'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); @@ -647,9 +651,9 @@ static ssize_t tlblazy_write_file(struct file *file, return -EINVAL; if (val) - static_branch_enable(&tlb_use_lazy_mode); + static_branch_enable(&__tlb_defer_switch_to_init_mm); else - static_branch_disable(&tlb_use_lazy_mode); + static_branch_disable(&__tlb_defer_switch_to_init_mm); return count; } @@ -660,23 +664,25 @@ static const struct file_operations fops_tlblazy = { .llseek = default_llseek, }; -static int __init init_tlb_use_lazy_mode(void) +static int __init init_tlblazy(void) { if (boot_cpu_has(X86_FEATURE_PCID)) { /* - * Heuristic: with PCID on, switching to and from - * init_mm is reasonably fast, but remote flush IPIs - * as expensive as ever, so turn off lazy TLB mode. + * If we have PCID, then switching to init_mm is reasonably + * fast. If we don't have PCID, then switching to init_mm is + * quite slow, so we default to trying to defer it in the + * hopes that we can avoid it entirely. The latter approach + * runs the risk of receiving otherwise unnecessary IPIs. * * We can't do this in setup_pcid() because static keys * haven't been initialized yet, and it would blow up * badly. */ - static_branch_disable(&tlb_use_lazy_mode); + static_branch_disable(&__tlb_defer_switch_to_init_mm); } - debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR, + debugfs_create_file("tlb_defer_switch_to_init_mm", S_IRUSR | S_IWUSR, arch_debugfs_dir, NULL, &fops_tlblazy); return 0; } -late_initcall(init_tlb_use_lazy_mode); +late_initcall(init_tlblazy); -- cgit From 7ac7f2c315ef76437f5119df354d334448534fb5 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 14 Oct 2017 09:59:51 -0700 Subject: x86/mm: Remove debug/x86/tlb_defer_switch_to_init_mm Borislav thinks that we don't need this knob in a released kernel. Get rid of it. Requested-by: Borislav Petkov Signed-off-by: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: b956575bed91 ("x86/mm: Flush more aggressively in lazy TLB mode") Link: http://lkml.kernel.org/r/1fa72431924e81e86c164ff7881bf9240d1f1a6c.1508000261.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/tlbflush.h | 20 ++++++++------ arch/x86/mm/tlb.c | 58 ----------------------------------------- 2 files changed, 12 insertions(+), 66 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 0d4a1bb7e303..c4aed0de565e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -82,16 +82,20 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) #endif -/* - * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point - * to init_mm when we switch to a kernel thread (e.g. the idle thread). If - * it's false, then we immediately switch CR3 when entering a kernel thread. - */ -DECLARE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm); - static inline bool tlb_defer_switch_to_init_mm(void) { - return static_branch_unlikely(&__tlb_defer_switch_to_init_mm); + /* + * If we have PCID, then switching to init_mm is reasonably + * fast. If we don't have PCID, then switching to init_mm is + * quite slow, so we try to defer it in the hopes that we can + * avoid it entirely. The latter approach runs the risk of + * receiving otherwise unnecessary IPIs. + * + * This choice is just a heuristic. The tlb code can handle this + * function returning true or false regardless of whether we have + * PCID. + */ + return !static_cpu_has(X86_FEATURE_PCID); } /* diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5ee3b59baa85..0f3d0cea4d00 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -30,7 +30,6 @@ atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); -DEFINE_STATIC_KEY_TRUE(__tlb_defer_switch_to_init_mm); static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, u16 *new_asid, bool *need_flush) @@ -629,60 +628,3 @@ static int __init create_tlb_single_page_flush_ceiling(void) return 0; } late_initcall(create_tlb_single_page_flush_ceiling); - -static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - char buf[2]; - - buf[0] = static_branch_likely(&__tlb_defer_switch_to_init_mm) - ? '1' : '0'; - buf[1] = '\n'; - - return simple_read_from_buffer(user_buf, count, ppos, buf, 2); -} - -static ssize_t tlblazy_write_file(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - bool val; - - if (kstrtobool_from_user(user_buf, count, &val)) - return -EINVAL; - - if (val) - static_branch_enable(&__tlb_defer_switch_to_init_mm); - else - static_branch_disable(&__tlb_defer_switch_to_init_mm); - - return count; -} - -static const struct file_operations fops_tlblazy = { - .read = tlblazy_read_file, - .write = tlblazy_write_file, - .llseek = default_llseek, -}; - -static int __init init_tlblazy(void) -{ - if (boot_cpu_has(X86_FEATURE_PCID)) { - /* - * If we have PCID, then switching to init_mm is reasonably - * fast. If we don't have PCID, then switching to init_mm is - * quite slow, so we default to trying to defer it in the - * hopes that we can avoid it entirely. The latter approach - * runs the risk of receiving otherwise unnecessary IPIs. - * - * We can't do this in setup_pcid() because static keys - * haven't been initialized yet, and it would blow up - * badly. - */ - static_branch_disable(&__tlb_defer_switch_to_init_mm); - } - - debugfs_create_file("tlb_defer_switch_to_init_mm", S_IRUSR | S_IWUSR, - arch_debugfs_dir, NULL, &fops_tlblazy); - return 0; -} -late_initcall(init_tlblazy); -- cgit From ce56a86e2ade45d052b3228cdfebe913a1ae7381 Mon Sep 17 00:00:00 2001 From: Craig Bergstrom Date: Thu, 19 Oct 2017 13:28:56 -0600 Subject: x86/mm: Limit mmap() of /dev/mem to valid physical addresses Currently, it is possible to mmap() any offset from /dev/mem. If a program mmaps() /dev/mem offsets outside of the addressable limits of a system, the page table can be corrupted by setting reserved bits. For example if you mmap() offset 0x0001000000000000 of /dev/mem on an x86_64 system with a 48-bit bus, the page fault handler will be called with error_code set to RSVD. The kernel then crashes with a page table corruption error. This change prevents this page table corruption on x86 by refusing to mmap offsets higher than the highest valid address in the system. Signed-off-by: Craig Bergstrom Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Luis R. Rodriguez Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Toshi Kani Cc: dsafonov@virtuozzo.com Cc: kirill.shutemov@linux.intel.com Cc: mhocko@suse.com Cc: oleg@redhat.com Link: http://lkml.kernel.org/r/20171019192856.39672-1-craigb@google.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io.h | 4 ++++ arch/x86/mm/mmap.c | 12 ++++++++++++ 2 files changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index c40a95c33bb8..322d25ae23ab 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -110,6 +110,10 @@ build_mmio_write(__writeq, "q", unsigned long, "r", ) #endif +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(phys_addr_t addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + /** * virt_to_phys - map virtual addresses to physical * @address: address to remap diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index a99679826846..320c6237e1d1 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -174,3 +174,15 @@ const char *arch_vma_name(struct vm_area_struct *vma) return "[mpx]"; return NULL; } + +int valid_phys_addr_range(phys_addr_t addr, size_t count) +{ + return addr + count <= __pa(high_memory); +} + +int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) +{ + phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT; + + return valid_phys_addr_range(addr, count); +} -- cgit