summaryrefslogtreecommitdiff
path: root/drivers/base/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/cpu.c')
-rw-r--r--drivers/base/cpu.c245
1 files changed, 156 insertions, 89 deletions
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c98849577d4..c6c57b6f61c6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -19,13 +19,14 @@
#include <linux/cpufeature.h>
#include <linux/tick.h>
#include <linux/pm_qos.h>
+#include <linux/delay.h>
#include <linux/sched/isolation.h>
#include "base.h"
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
-static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
+static int cpu_subsys_match(struct device *dev, const struct device_driver *drv)
{
/* ACPI style match is the only one that may succeed. */
if (acpi_driver_match_device(dev, drv))
@@ -50,12 +51,30 @@ static int cpu_subsys_online(struct device *dev)
int cpuid = dev->id;
int from_nid, to_nid;
int ret;
+ int retries = 0;
from_nid = cpu_to_node(cpuid);
if (from_nid == NUMA_NO_NODE)
return -ENODEV;
+retry:
ret = cpu_device_up(dev);
+
+ /*
+ * If -EBUSY is returned, it is likely that hotplug is temporarily
+ * disabled when cpu_hotplug_disable() was called. This condition is
+ * transient. So we retry after waiting for an exponentially
+ * increasing delay up to a total of at least 620ms as some PCI
+ * device initialization can take quite a while.
+ */
+ if (ret == -EBUSY) {
+ retries++;
+ if (retries > 5)
+ return ret;
+ msleep(10 * (1 << retries));
+ goto retry;
+ }
+
/*
* When hot adding memory to memoryless node and enabling a cpu
* on the node, node number of the cpu may internally change.
@@ -76,6 +95,7 @@ void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->dev.id;
+ set_cpu_enabled(logical_cpu, false);
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
device_unregister(&cpu->dev);
@@ -125,18 +145,7 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-struct bus_type cpu_subsys = {
- .name = "cpu",
- .dev_name = "cpu",
- .match = cpu_subsys_match,
-#ifdef CONFIG_HOTPLUG_CPU
- .online = cpu_subsys_online,
- .offline = cpu_subsys_offline,
-#endif
-};
-EXPORT_SYMBOL_GPL(cpu_subsys);
-
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
static ssize_t crash_notes_show(struct device *dev,
@@ -181,14 +190,14 @@ static const struct attribute_group crash_note_cpu_attr_group = {
#endif
static const struct attribute_group *common_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
};
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
-#ifdef CONFIG_KEXEC
+#ifdef CONFIG_CRASH_DUMP
&crash_note_cpu_attr_group,
#endif
NULL
@@ -265,6 +274,13 @@ static ssize_t print_cpus_offline(struct device *dev,
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static ssize_t print_cpus_enabled(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(cpu_enabled_mask));
+}
+static DEVICE_ATTR(enabled, 0444, print_cpus_enabled, NULL);
+
static ssize_t print_cpus_isolated(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -284,13 +300,40 @@ static ssize_t print_cpus_isolated(struct device *dev,
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+static ssize_t housekeeping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *hk_mask;
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(hk_mask));
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(housekeeping);
+
#ifdef CONFIG_NO_HZ_FULL
-static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nohz_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ if (cpumask_available(tick_nohz_full_mask))
+ return sysfs_emit(buf, "%*pbl\n",
+ cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "\n");
}
-static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+static DEVICE_ATTR_RO(nohz_full);
+#endif
+
+#ifdef CONFIG_CRASH_HOTPLUG
+static ssize_t crash_hotplug_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
+}
+static DEVICE_ATTR_RO(crash_hotplug);
#endif
static void cpu_device_release(struct device *dev)
@@ -299,7 +342,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
+ * of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
@@ -336,7 +379,7 @@ static ssize_t print_cpu_modalias(struct device *dev,
return len;
}
-static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (buf) {
@@ -348,6 +391,20 @@ static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif
+const struct bus_type cpu_subsys = {
+ .name = "cpu",
+ .dev_name = "cpu",
+ .match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+ .online = cpu_subsys_online,
+ .offline = cpu_subsys_offline,
+#endif
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ .uevent = cpu_uevent,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -368,9 +425,6 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
-#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
- cpu->dev.bus->uevent = cpu_uevent;
-#endif
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
cpu->dev.groups = hotplugable_cpu_attr_groups;
@@ -384,6 +438,7 @@ int register_cpu(struct cpu *cpu, int num)
register_cpu_under_node(num, cpu_to_node(num));
dev_pm_qos_expose_latency_limit(&cpu->dev,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
+ set_cpu_enabled(num, true);
return 0;
}
@@ -465,10 +520,15 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+ &dev_attr_enabled.attr,
&dev_attr_isolated.attr,
+ &dev_attr_housekeeping.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
+#ifdef CONFIG_CRASH_HOTPLUG
+ &dev_attr_crash_hotplug.attr,
+#endif
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
&dev_attr_modalias.attr,
#endif
@@ -487,94 +547,81 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
bool cpu_is_hotpluggable(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
- return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable
+ && tick_nohz_cpu_hotpluggable(cpu);
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
#ifdef CONFIG_GENERIC_CPU_DEVICES
-static DEFINE_PER_CPU(struct cpu, cpu_devices);
-#endif
-
-static void __init cpu_dev_register_generic(void)
-{
-#ifdef CONFIG_GENERIC_CPU_DEVICES
- int i;
-
- for_each_possible_cpu(i) {
- if (register_cpu(&per_cpu(cpu_devices, i), i))
- panic("Failed to register CPU device");
- }
-#endif
-}
+DEFINE_PER_CPU(struct cpu, cpu_devices);
-#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
-
-ssize_t __weak cpu_show_meltdown(struct device *dev,
- struct device_attribute *attr, char *buf)
+bool __weak arch_cpu_is_hotpluggable(int cpu)
{
- return sysfs_emit(buf, "Not affected\n");
+ return false;
}
-ssize_t __weak cpu_show_spectre_v1(struct device *dev,
- struct device_attribute *attr, char *buf)
+int __weak arch_register_cpu(int cpu)
{
- return sysfs_emit(buf, "Not affected\n");
-}
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
-ssize_t __weak cpu_show_spectre_v2(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "Not affected\n");
-}
+ c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
-ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "Not affected\n");
+ return register_cpu(c, cpu);
}
-ssize_t __weak cpu_show_l1tf(struct device *dev,
- struct device_attribute *attr, char *buf)
+#ifdef CONFIG_HOTPLUG_CPU
+void __weak arch_unregister_cpu(int num)
{
- return sysfs_emit(buf, "Not affected\n");
+ unregister_cpu(&per_cpu(cpu_devices, num));
}
+#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_GENERIC_CPU_DEVICES */
-ssize_t __weak cpu_show_mds(struct device *dev,
- struct device_attribute *attr, char *buf)
+static void __init cpu_dev_register_generic(void)
{
- return sysfs_emit(buf, "Not affected\n");
-}
+ int i, ret;
-ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sysfs_emit(buf, "Not affected\n");
-}
+ if (!IS_ENABLED(CONFIG_GENERIC_CPU_DEVICES))
+ return;
-ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "Not affected\n");
+ for_each_present_cpu(i) {
+ ret = arch_register_cpu(i);
+ if (ret && ret != -EPROBE_DEFER)
+ pr_warn("register_cpu %d failed (%d)\n", i, ret);
+ }
}
-ssize_t __weak cpu_show_srbds(struct device *dev,
+#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
+static ssize_t cpu_show_not_affected(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Not affected\n");
}
-ssize_t __weak cpu_show_mmio_stale_data(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "Not affected\n");
-}
-
-ssize_t __weak cpu_show_retbleed(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "Not affected\n");
-}
+#define CPU_SHOW_VULN_FALLBACK(func) \
+ ssize_t cpu_show_##func(struct device *, \
+ struct device_attribute *, char *) \
+ __attribute__((weak, alias("cpu_show_not_affected")))
+
+CPU_SHOW_VULN_FALLBACK(meltdown);
+CPU_SHOW_VULN_FALLBACK(spectre_v1);
+CPU_SHOW_VULN_FALLBACK(spectre_v2);
+CPU_SHOW_VULN_FALLBACK(spec_store_bypass);
+CPU_SHOW_VULN_FALLBACK(l1tf);
+CPU_SHOW_VULN_FALLBACK(mds);
+CPU_SHOW_VULN_FALLBACK(tsx_async_abort);
+CPU_SHOW_VULN_FALLBACK(itlb_multihit);
+CPU_SHOW_VULN_FALLBACK(srbds);
+CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
+CPU_SHOW_VULN_FALLBACK(retbleed);
+CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
+CPU_SHOW_VULN_FALLBACK(gds);
+CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+CPU_SHOW_VULN_FALLBACK(ghostwrite);
+CPU_SHOW_VULN_FALLBACK(old_microcode);
+CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -587,6 +634,14 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
+static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
+static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -600,6 +655,14 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_srbds.attr,
&dev_attr_mmio_stale_data.attr,
&dev_attr_retbleed.attr,
+ &dev_attr_spec_rstack_overflow.attr,
+ &dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_ghostwrite.attr,
+ &dev_attr_old_microcode.attr,
+ &dev_attr_indirect_target_selection.attr,
+ &dev_attr_tsa.attr,
+ &dev_attr_vmscape.attr,
NULL
};
@@ -610,9 +673,13 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {
static void __init cpu_register_vulnerabilities(void)
{
- if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
- &cpu_root_vulnerabilities_group))
- pr_err("Unable to register CPU vulnerabilities\n");
+ struct device *dev = bus_get_dev_root(&cpu_subsys);
+
+ if (dev) {
+ if (sysfs_create_group(&dev->kobj, &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+ put_device(dev);
+ }
}
#else