diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 231 |
1 files changed, 188 insertions, 43 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 3d2bf1d50a0c..b674fdf96208 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -37,6 +37,7 @@ #include <linux/cpuset.h> #include <linux/random.h> #include <linux/cc_platform.h> +#include <linux/parser.h> #include <trace/events/power.h> #define CREATE_TRACE_POINTS @@ -330,7 +331,7 @@ static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state st /* Poll for one millisecond */ arch_cpuhp_sync_state_poll(); } else { - usleep_range_state(USEC_PER_MSEC, 2 * USEC_PER_MSEC, TASK_UNINTERRUPTIBLE); + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); } sync = atomic_read(st); } @@ -483,6 +484,8 @@ static int cpu_hotplug_disabled; DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); +static bool cpu_hotplug_offline_disabled __ro_after_init; + void cpus_read_lock(void) { percpu_down_read(&cpu_hotplug_lock); @@ -524,6 +527,7 @@ void lockdep_assert_cpus_held(void) percpu_rwsem_assert_held(&cpu_hotplug_lock); } +EXPORT_SYMBOL_GPL(lockdep_assert_cpus_held); #ifdef CONFIG_LOCKDEP int lockdep_is_cpus_held(void) @@ -542,6 +546,14 @@ static void lockdep_release_cpus_lock(void) rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_); } +/* Declare CPU offlining not supported */ +void cpu_hotplug_disable_offlining(void) +{ + cpu_maps_update_begin(); + cpu_hotplug_offline_disabled = true; + cpu_maps_update_done(); +} + /* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects @@ -895,12 +907,13 @@ static int finish_cpu(unsigned int cpu) struct mm_struct *mm = idle->active_mm; /* - * idle_task_exit() will have switched to &init_mm, now - * clean up any remaining active_mm state. + * sched_force_init_mm() ensured the use of &init_mm, + * drop that refcount now that the CPU has stopped. */ - if (mm != &init_mm) - idle->active_mm = &init_mm; + WARN_ON(mm != &init_mm); + idle->active_mm = NULL; mmdrop_lazy_tlb(mm); + return 0; } @@ -1296,9 +1309,6 @@ static int takedown_cpu(unsigned int cpu) */ irq_lock_sparse(); - /* - * So now all preempt/rcu users must observe !cpu_active(). - */ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); if (err) { /* CPU refused to die */ @@ -1328,7 +1338,7 @@ static int takedown_cpu(unsigned int cpu) cpuhp_bp_sync_dead(cpu); - tick_cleanup_dead_cpu(cpu); + lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu)); /* * Callbacks must be re-integrated right away to the RCU state machine. @@ -1442,11 +1452,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, out: cpus_write_unlock(); - /* - * Do post unplug cleanup. This is still protected against - * concurrent CPU hotplug via cpu_add_remove_lock. - */ - lockup_detector_cleanup(); arch_smt_update(); return ret; } @@ -1471,7 +1476,7 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) * If the platform does not support hotplug, report it explicitly to * differentiate it from a transient offlining failure. */ - if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED)) + if (cpu_hotplug_offline_disabled) return -EOPNOTSUPP; if (cpu_hotplug_disabled) return -EBUSY; @@ -1798,6 +1803,7 @@ static int __init parallel_bringup_parse_param(char *arg) } early_param("cpuhp.parallel", parallel_bringup_parse_param); +#ifdef CONFIG_HOTPLUG_SMT static inline bool cpuhp_smt_aware(void) { return cpu_smt_max_threads > 1; @@ -1807,6 +1813,21 @@ static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) { return cpu_primary_thread_mask; } +#else +static inline bool cpuhp_smt_aware(void) +{ + return false; +} +static inline const struct cpumask *cpuhp_get_primary_thread_mask(void) +{ + return cpu_none_mask; +} +#endif + +bool __weak arch_cpuhp_init_parallel_bringup(void) +{ + return true; +} /* * On architectures which have enabled parallel bringup this invokes all BP @@ -1894,8 +1915,8 @@ int freeze_secondary_cpus(int primary) cpumask_clear(frozen_cpus); pr_info("Disabling non-boot CPUs ...\n"); - for_each_online_cpu(cpu) { - if (cpu == primary) + for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) { + if (!cpu_online(cpu) || cpu == primary) continue; if (pm_wakeup_pending()) { @@ -2046,11 +2067,6 @@ static struct cpuhp_step cpuhp_hp_states[] = { .teardown.single = NULL, .cant_stop = true, }, - [CPUHP_PERF_PREPARE] = { - .name = "perf:prepare", - .startup.single = perf_event_init_cpu, - .teardown.single = perf_event_exit_cpu, - }, [CPUHP_RANDOM_PREPARE] = { .name = "random:prepare", .startup.single = random_prepare_cpu, @@ -2153,7 +2169,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { }, [CPUHP_AP_HRTIMERS_DYING] = { .name = "hrtimers:dying", - .startup.single = NULL, + .startup.single = hrtimers_cpu_starting, .teardown.single = hrtimers_cpu_dying, }, [CPUHP_AP_TICK_DYING] = { @@ -2679,6 +2695,14 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) return ret; } +/* Check if the core a CPU belongs to is online */ +#if !defined(topology_is_core_online) +static inline bool topology_is_core_online(unsigned int cpu) +{ + return true; +} +#endif + int cpuhp_smt_enable(void) { int cpu, ret = 0; @@ -2689,7 +2713,7 @@ int cpuhp_smt_enable(void) /* Skip online CPUs and CPUs on offline nodes */ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) continue; - if (!cpu_smt_thread_allowed(cpu)) + if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu)) continue; ret = _cpu_up(cpu, 0, CPUHP_ONLINE); if (ret) @@ -2832,7 +2856,6 @@ static struct attribute *cpuhp_cpu_attrs[] = { static const struct attribute_group cpuhp_cpu_attr_group = { .attrs = cpuhp_cpu_attrs, .name = "hotplug", - NULL }; static ssize_t states_show(struct device *dev, @@ -2864,7 +2887,6 @@ static struct attribute *cpuhp_cpu_root_attrs[] = { static const struct attribute_group cpuhp_cpu_root_attr_group = { .attrs = cpuhp_cpu_root_attrs, .name = "hotplug", - NULL }; #ifdef CONFIG_HOTPLUG_SMT @@ -2986,7 +3008,6 @@ static struct attribute *cpuhp_smt_attrs[] = { static const struct attribute_group cpuhp_smt_attr_group = { .attrs = cpuhp_smt_attrs, .name = "smt", - NULL }; static int __init cpu_smt_sysfs_init(void) @@ -3064,14 +3085,20 @@ EXPORT_SYMBOL(cpu_all_bits); #ifdef CONFIG_INIT_ALL_POSSIBLE struct cpumask __cpu_possible_mask __ro_after_init = {CPU_BITS_ALL}; +unsigned int __num_possible_cpus __ro_after_init = NR_CPUS; #else struct cpumask __cpu_possible_mask __ro_after_init; +unsigned int __num_possible_cpus __ro_after_init; #endif EXPORT_SYMBOL(__cpu_possible_mask); +EXPORT_SYMBOL(__num_possible_cpus); struct cpumask __cpu_online_mask __read_mostly; EXPORT_SYMBOL(__cpu_online_mask); +struct cpumask __cpu_enabled_mask __read_mostly; +EXPORT_SYMBOL(__cpu_enabled_mask); + struct cpumask __cpu_present_mask __read_mostly; EXPORT_SYMBOL(__cpu_present_mask); @@ -3092,11 +3119,7 @@ void init_cpu_present(const struct cpumask *src) void init_cpu_possible(const struct cpumask *src) { cpumask_copy(&__cpu_possible_mask, src); -} - -void init_cpu_online(const struct cpumask *src) -{ - cpumask_copy(&__cpu_online_mask, src); + __num_possible_cpus = cpumask_weight(&__cpu_possible_mask); } void set_cpu_online(unsigned int cpu, bool online) @@ -3121,6 +3144,21 @@ void set_cpu_online(unsigned int cpu, bool online) } /* + * This should be marked __init, but there is a boatload of call sites + * which need to be fixed up to do so. Sigh... + */ +void set_cpu_possible(unsigned int cpu, bool possible) +{ + if (possible) { + if (!cpumask_test_and_set_cpu(cpu, &__cpu_possible_mask)) + __num_possible_cpus++; + } else { + if (cpumask_test_and_clear_cpu(cpu, &__cpu_possible_mask)) + __num_possible_cpus--; + } +} + +/* * Activate the first processor. */ void __init boot_cpu_init(void) @@ -3153,8 +3191,38 @@ void __init boot_cpu_hotplug_init(void) #ifdef CONFIG_CPU_MITIGATIONS /* - * These are used for a global "mitigations=" cmdline option for toggling - * optional CPU mitigations. + * All except the cross-thread attack vector are mitigated by default. + * Cross-thread mitigation often requires disabling SMT which is expensive + * so cross-thread mitigations are only partially enabled by default. + * + * Guest-to-Host and Guest-to-Guest vectors are only needed if KVM support is + * present. + */ +static bool attack_vectors[NR_CPU_ATTACK_VECTORS] __ro_after_init = { + [CPU_MITIGATE_USER_KERNEL] = true, + [CPU_MITIGATE_USER_USER] = true, + [CPU_MITIGATE_GUEST_HOST] = IS_ENABLED(CONFIG_KVM), + [CPU_MITIGATE_GUEST_GUEST] = IS_ENABLED(CONFIG_KVM), +}; + +bool cpu_attack_vector_mitigated(enum cpu_attack_vectors v) +{ + if (v < NR_CPU_ATTACK_VECTORS) + return attack_vectors[v]; + + WARN_ONCE(1, "Invalid attack vector %d\n", v); + return false; +} + +/* + * There are 3 global options, 'off', 'auto', 'auto,nosmt'. These may optionally + * be combined with attack-vector disables which follow them. + * + * Examples: + * mitigations=auto,no_user_kernel,no_user_user,no_cross_thread + * mitigations=auto,nosmt,no_guest_host,no_guest_guest + * + * mitigations=off is equivalent to disabling all attack vectors. */ enum cpu_mitigations { CPU_MITIGATIONS_OFF, @@ -3162,19 +3230,96 @@ enum cpu_mitigations { CPU_MITIGATIONS_AUTO_NOSMT, }; +enum { + NO_USER_KERNEL, + NO_USER_USER, + NO_GUEST_HOST, + NO_GUEST_GUEST, + NO_CROSS_THREAD, + NR_VECTOR_PARAMS, +}; + +enum smt_mitigations smt_mitigations __ro_after_init = SMT_MITIGATIONS_AUTO; static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO; +static const match_table_t global_mitigations = { + { CPU_MITIGATIONS_AUTO_NOSMT, "auto,nosmt"}, + { CPU_MITIGATIONS_AUTO, "auto"}, + { CPU_MITIGATIONS_OFF, "off"}, +}; + +static const match_table_t vector_mitigations = { + { NO_USER_KERNEL, "no_user_kernel"}, + { NO_USER_USER, "no_user_user"}, + { NO_GUEST_HOST, "no_guest_host"}, + { NO_GUEST_GUEST, "no_guest_guest"}, + { NO_CROSS_THREAD, "no_cross_thread"}, + { NR_VECTOR_PARAMS, NULL}, +}; + +static int __init mitigations_parse_global_opt(char *arg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(global_mitigations); i++) { + const char *pattern = global_mitigations[i].pattern; + + if (!strncmp(arg, pattern, strlen(pattern))) { + cpu_mitigations = global_mitigations[i].token; + return strlen(pattern); + } + } + + return 0; +} + static int __init mitigations_parse_cmdline(char *arg) { - if (!strcmp(arg, "off")) - cpu_mitigations = CPU_MITIGATIONS_OFF; - else if (!strcmp(arg, "auto")) - cpu_mitigations = CPU_MITIGATIONS_AUTO; - else if (!strcmp(arg, "auto,nosmt")) - cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; - else - pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", - arg); + char *s, *p; + int len; + + len = mitigations_parse_global_opt(arg); + + if (cpu_mitigations_off()) { + memset(attack_vectors, 0, sizeof(attack_vectors)); + smt_mitigations = SMT_MITIGATIONS_OFF; + } else if (cpu_mitigations_auto_nosmt()) { + smt_mitigations = SMT_MITIGATIONS_ON; + } + + p = arg + len; + + if (!*p) + return 0; + + /* Attack vector controls may come after the ',' */ + if (*p++ != ',' || !IS_ENABLED(CONFIG_ARCH_HAS_CPU_ATTACK_VECTORS)) { + pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", arg); + return 0; + } + + while ((s = strsep(&p, ",")) != NULL) { + switch (match_token(s, vector_mitigations, NULL)) { + case NO_USER_KERNEL: + attack_vectors[CPU_MITIGATE_USER_KERNEL] = false; + break; + case NO_USER_USER: + attack_vectors[CPU_MITIGATE_USER_USER] = false; + break; + case NO_GUEST_HOST: + attack_vectors[CPU_MITIGATE_GUEST_HOST] = false; + break; + case NO_GUEST_GUEST: + attack_vectors[CPU_MITIGATE_GUEST_GUEST] = false; + break; + case NO_CROSS_THREAD: + smt_mitigations = SMT_MITIGATIONS_OFF; + break; + default: + pr_crit("Unsupported mitigations options %s\n", s); + return 0; + } + } return 0; } |
