diff options
-rw-r--r-- | Documentation/admin-guide/hw-vuln/attack_vector_controls.rst | 5 | ||||
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | Makefile | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/stacktrace.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu.h | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 5 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/topology_amd.c | 23 | ||||
-rw-r--r-- | drivers/gpio/gpio-timberdale.c | 2 | ||||
-rw-r--r-- | drivers/irqchip/irq-atmel-aic.c | 2 | ||||
-rw-r--r-- | drivers/irqchip/irq-atmel-aic5.c | 2 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic-v5-irs.c | 9 | ||||
-rw-r--r-- | drivers/irqchip/irq-mvebu-gicp.c | 2 | ||||
-rw-r--r-- | init/Kconfig | 9 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 18 | ||||
-rw-r--r-- | kernel/sched/debug.c | 6 | ||||
-rw-r--r-- | lib/ubsan.c | 6 | ||||
-rw-r--r-- | tools/testing/selftests/arm64/fp/fp-ptrace.c | 5 |
21 files changed, 94 insertions, 54 deletions
diff --git a/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst b/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst index 6dd0800146f6..5964901d66e3 100644 --- a/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst +++ b/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst @@ -215,7 +215,7 @@ Spectre_v2 X X Spectre_v2_user X X * (Note 1) SRBDS X X X X SRSO X X X X -SSB (Note 4) +SSB X TAA X X X X * (Note 2) TSA X X X X =============== ============== ============ ============= ============== ============ ======== @@ -229,9 +229,6 @@ Notes: 3 -- Disables SMT if cross-thread mitigations are fully enabled, the CPU is vulnerable, and STIBP is not supported - 4 -- Speculative store bypass is always enabled by default (no kernel - mitigation applied) unless overridden with spec_store_bypass_disable option - When an attack-vector is disabled, all mitigations for the vulnerabilities listed in the above table are disabled, unless mitigation is required for a different enabled attack-vector or a mitigation is explicitly selected via a diff --git a/MAINTAINERS b/MAINTAINERS index e94d68c980c5..6dcfbd11efef 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -931,7 +931,7 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml F: drivers/dma/altera-msgdma.c ALTERA PIO DRIVER -M: Mun Yew Tham <mun.yew.tham@intel.com> +M: Adrian Ng <adrianhoyin.ng@altera.com> L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-altera.c @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 17 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h index f80a85b091d6..ba2f771cca23 100644 --- a/arch/arm/include/asm/stacktrace.h +++ b/arch/arm/include/asm/stacktrace.h @@ -2,8 +2,9 @@ #ifndef __ASM_STACKTRACE_H #define __ASM_STACKTRACE_H -#include <asm/ptrace.h> #include <linux/llist.h> +#include <asm/ptrace.h> +#include <asm/sections.h> struct stackframe { /* diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 6e8aa8e72601..49f1a810df16 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,6 +17,13 @@ #include <linux/refcount.h> #include <asm/cpufeature.h> +enum pgtable_type { + TABLE_PTE, + TABLE_PMD, + TABLE_PUD, + TABLE_P4D, +}; + typedef struct { atomic64_t id; #ifdef CONFIG_COMPAT diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0d45c5e9b4da..ef269a5a37e1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -84,6 +84,7 @@ #include <asm/hwcap.h> #include <asm/insn.h> #include <asm/kvm_host.h> +#include <asm/mmu.h> #include <asm/mmu_context.h> #include <asm/mte.h> #include <asm/hypervisor.h> @@ -1945,11 +1946,11 @@ static bool has_pmuv3(const struct arm64_cpu_capabilities *entry, int scope) extern void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, - phys_addr_t (*pgtable_alloc)(int), int flags); + phys_addr_t (*pgtable_alloc)(enum pgtable_type), int flags); static phys_addr_t __initdata kpti_ng_temp_alloc; -static phys_addr_t __init kpti_ng_pgd_alloc(int shift) +static phys_addr_t __init kpti_ng_pgd_alloc(enum pgtable_type type) { kpti_ng_temp_alloc -= PAGE_SIZE; return kpti_ng_temp_alloc; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 34e5d78af076..183801520740 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -47,13 +47,6 @@ #define NO_CONT_MAPPINGS BIT(1) #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ -enum pgtable_type { - TABLE_PTE, - TABLE_PMD, - TABLE_PUD, - TABLE_P4D, -}; - u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 49ef1b832c1a..af838b8d845c 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -416,6 +416,10 @@ static bool __init should_mitigate_vuln(unsigned int bug) cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || (smt_mitigations != SMT_MITIGATIONS_OFF); + + case X86_BUG_SPEC_STORE_BYPASS: + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER); + default: WARN(1, "Unknown bug %x\n", bug); return false; @@ -2710,6 +2714,11 @@ static void __init ssb_select_mitigation(void) ssb_mode = SPEC_STORE_BYPASS_DISABLE; break; case SPEC_STORE_BYPASS_CMD_AUTO: + if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS)) + ssb_mode = SPEC_STORE_BYPASS_PRCTL; + else + ssb_mode = SPEC_STORE_BYPASS_NONE; + break; case SPEC_STORE_BYPASS_CMD_PRCTL: ssb_mode = SPEC_STORE_BYPASS_PRCTL; break; diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 076eaa41b8c8..98ae4c37c93e 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -262,7 +262,7 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) || + } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) || (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); } diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 097e39327942..514f63340880 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -171,8 +171,28 @@ static int cmp_id(const void *key, const void *elem) return 1; } +static u32 cpuid_to_ucode_rev(unsigned int val) +{ + union zen_patch_rev p = {}; + union cpuid_1_eax c; + + c.full = val; + + p.stepping = c.stepping; + p.model = c.model; + p.ext_model = c.ext_model; + p.ext_fam = c.ext_fam; + + return p.ucode_rev; +} + static bool need_sha_check(u32 cur_rev) { + if (!cur_rev) { + cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); + pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev); + } + switch (cur_rev >> 8) { case 0x80012: return cur_rev <= 0x800126f; break; case 0x80082: return cur_rev <= 0x800820f; break; @@ -749,8 +769,6 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi n.equiv_cpu = equiv_cpu; n.patch_id = uci->cpu_sig.rev; - WARN_ON_ONCE(!n.patch_id); - list_for_each_entry(p, µcode_cache, plist) if (patch_cpus_equivalent(p, &n, false)) return p; diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c index 843b1655ab45..827dd0dbb6e9 100644 --- a/arch/x86/kernel/cpu/topology_amd.c +++ b/arch/x86/kernel/cpu/topology_amd.c @@ -81,20 +81,25 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext) cpuid_leaf(0x8000001e, &leaf); - tscan->c->topo.initial_apicid = leaf.ext_apic_id; - /* - * If leaf 0xb is available, then the domain shifts are set - * already and nothing to do here. Only valid for family >= 0x17. + * If leaf 0xb/0x26 is available, then the APIC ID and the domain + * shifts are set already. */ - if (!has_topoext && tscan->c->x86 >= 0x17) { + if (!has_topoext) { + tscan->c->topo.initial_apicid = leaf.ext_apic_id; + /* - * Leaf 0x80000008 set the CORE domain shift already. - * Update the SMT domain, but do not propagate it. + * Leaf 0x8000008 sets the CORE domain shift but not the + * SMT domain shift. On CPUs with family >= 0x17, there + * might be hyperthreads. */ - unsigned int nthreads = leaf.core_nthreads + 1; + if (tscan->c->x86 >= 0x17) { + /* Update the SMT domain, but do not propagate it. */ + unsigned int nthreads = leaf.core_nthreads + 1; - topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads); + topology_update_dom(tscan, TOPO_SMT_DOMAIN, + get_count_order(nthreads), nthreads); + } } store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id); diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index 679e27f00ff6..f488939dd00a 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -137,7 +137,7 @@ static int timbgpio_irq_type(struct irq_data *d, unsigned trigger) u32 ver; int ret = 0; - if (offset < 0 || offset > tgpio->gpio.ngpio) + if (offset < 0 || offset >= tgpio->gpio.ngpio) return -EINVAL; ver = ioread32(tgpio->membase + TGPIO_VER); diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c index 03aeed39a4d2..1dcc52760eca 100644 --- a/drivers/irqchip/irq-atmel-aic.c +++ b/drivers/irqchip/irq-atmel-aic.c @@ -188,7 +188,7 @@ static int aic_irq_domain_xlate(struct irq_domain *d, gc = dgc->gc[idx]; - guard(raw_spinlock_irq)(&gc->lock); + guard(raw_spinlock_irqsave)(&gc->lock); smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); aic_common_set_priority(intspec[2], &smr); irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 60b00d2c3d7a..1f14b401f71d 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -279,7 +279,7 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, if (ret) return ret; - guard(raw_spinlock_irq)(&bgc->lock); + guard(raw_spinlock_irqsave)(&bgc->lock); irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR); smr = irq_reg_readl(bgc, AT91_AIC5_SMR); aic_common_set_priority(intspec[2], &smr); diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c index f845415f9143..13c035727e32 100644 --- a/drivers/irqchip/irq-gic-v5-irs.c +++ b/drivers/irqchip/irq-gic-v5-irs.c @@ -5,6 +5,7 @@ #define pr_fmt(fmt) "GICv5 IRS: " fmt +#include <linux/kmemleak.h> #include <linux/log2.h> #include <linux/of.h> #include <linux/of_address.h> @@ -117,6 +118,7 @@ static int __init gicv5_irs_init_ist_linear(struct gicv5_irs_chip_data *irs_data kfree(ist); return ret; } + kmemleak_ignore(ist); return 0; } @@ -232,6 +234,7 @@ int gicv5_irs_iste_alloc(const u32 lpi) kfree(l2ist); return ret; } + kmemleak_ignore(l2ist); /* * Make sure we invalidate the cache line pulled before the IRS @@ -623,12 +626,14 @@ static int __init gicv5_irs_of_init_affinity(struct device_node *node, int cpu; cpu_node = of_parse_phandle(node, "cpus", i); - if (WARN_ON(!cpu_node)) + if (!cpu_node) { + pr_warn(FW_BUG "Erroneous CPU node phandle\n"); continue; + } cpu = of_cpu_node_to_id(cpu_node); of_node_put(cpu_node); - if (WARN_ON(cpu < 0)) + if (cpu < 0) continue; if (iaffids[i] & ~iaffid_mask) { diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c index 54833717f8a7..667bde3c651f 100644 --- a/drivers/irqchip/irq-mvebu-gicp.c +++ b/drivers/irqchip/irq-mvebu-gicp.c @@ -238,7 +238,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev) } base = ioremap(gicp->res->start, resource_size(gicp->res)); - if (IS_ERR(base)) { + if (!base) { dev_err(&pdev->dev, "ioremap() failed. Unable to clear pending interrupts.\n"); } else { for (i = 0; i < 64; i++) diff --git a/init/Kconfig b/init/Kconfig index 836320251219..d811cad02a75 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -117,10 +117,11 @@ config CC_HAS_NO_PROFILE_FN_ATTR config CC_HAS_COUNTED_BY bool - # clang needs to be at least 19.1.3 to avoid __bdos miscalculations - # https://github.com/llvm/llvm-project/pull/110497 - # https://github.com/llvm/llvm-project/pull/112636 - default y if CC_IS_CLANG && CLANG_VERSION >= 190103 + # clang needs to be at least 20.1.0 to avoid potential crashes + # when building structures that contain __counted_by + # https://github.com/ClangBuiltLinux/linux/issues/2114 + # https://github.com/llvm/llvm-project/commit/160fb1121cdf703c3ef5e61fb26c5659eb581489 + default y if CC_IS_CLANG && CLANG_VERSION >= 200100 # supported since gcc 15.1.0 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 default y if CC_IS_GCC && GCC_VERSION >= 150100 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e2d51f4306b3..f25301267e47 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1496,10 +1496,12 @@ throttle: } if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) { - if (dl_server(dl_se)) - enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); - else + if (dl_server(dl_se)) { + replenish_dl_new_period(dl_se, rq); + start_dl_timer(dl_se); + } else { enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH); + } } if (!is_leftmost(dl_se, &rq->dl)) @@ -1611,7 +1613,7 @@ void dl_server_stop(struct sched_dl_entity *dl_se) static bool dl_server_stopped(struct sched_dl_entity *dl_se) { if (!dl_se->dl_server_active) - return false; + return true; if (dl_se->dl_server_idle) { dl_server_stop(dl_se); @@ -1849,7 +1851,9 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) u64 deadline = dl_se->deadline; dl_rq->dl_nr_running++; - add_nr_running(rq_of_dl_rq(dl_rq), 1); + + if (!dl_server(dl_se)) + add_nr_running(rq_of_dl_rq(dl_rq), 1); inc_dl_deadline(dl_rq, deadline); } @@ -1859,7 +1863,9 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { WARN_ON(!dl_rq->dl_nr_running); dl_rq->dl_nr_running--; - sub_nr_running(rq_of_dl_rq(dl_rq), 1); + + if (!dl_server(dl_se)) + sub_nr_running(rq_of_dl_rq(dl_rq), 1); dec_dl_deadline(dl_rq, dl_se->deadline); } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 3f06ab84d53f..02e16b70a790 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -376,10 +376,8 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu return -EINVAL; } - if (rq->cfs.h_nr_queued) { - update_rq_clock(rq); - dl_server_stop(&rq->fair_server); - } + update_rq_clock(rq); + dl_server_stop(&rq->fair_server); retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0); if (retval) diff --git a/lib/ubsan.c b/lib/ubsan.c index a6ca235dd714..456e3dd8f4ea 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -333,18 +333,18 @@ EXPORT_SYMBOL(__ubsan_handle_implicit_conversion); void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs) { struct overflow_data *data = _data; - char rhs_val_str[VALUE_LENGTH]; + char lhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; ubsan_prologue(&data->location, "division-overflow"); - val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); + val_to_string(lhs_val_str, sizeof(lhs_val_str), data->type, lhs); if (type_is_signed(data->type) && get_signed_val(data->type, rhs) == -1) pr_err("division of %s by -1 cannot be represented in type %s\n", - rhs_val_str, data->type->type_name); + lhs_val_str, data->type->type_name); else pr_err("division by zero\n"); diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.c b/tools/testing/selftests/arm64/fp/fp-ptrace.c index 124bc883365e..cdd7a45c045d 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace.c +++ b/tools/testing/selftests/arm64/fp/fp-ptrace.c @@ -1187,7 +1187,7 @@ static void sve_write_sve(pid_t child, struct test_config *config) if (!vl) return; - iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); + iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_SVE); iov.iov_base = malloc(iov.iov_len); if (!iov.iov_base) { ksft_print_msg("Failed allocating %lu byte SVE write buffer\n", @@ -1234,8 +1234,7 @@ static void sve_write_fpsimd(pid_t child, struct test_config *config) if (!vl) return; - iov.iov_len = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, - SVE_PT_REGS_FPSIMD); + iov.iov_len = SVE_PT_SIZE(vq, SVE_PT_REGS_FPSIMD); iov.iov_base = malloc(iov.iov_len); if (!iov.iov_base) { ksft_print_msg("Failed allocating %lu byte SVE write buffer\n", |