From 9a6c2c3c7a73ce315c57c1b002caad6fcc858d0f Mon Sep 17 00:00:00 2001 From: Giovanni Gherdovich Date: Thu, 16 Apr 2020 07:47:42 +0200 Subject: x86, sched: Bail out of frequency invariance if base frequency is unknown Some hypervisors such as VMWare ESXi 5.5 advertise support for X86_FEATURE_APERFMPERF but then fill all MSR's with zeroes. In particular, MSR_PLATFORM_INFO set to zero tricks the code that wants to know the base clock frequency of the CPU (highest non-turbo frequency), producing a division by zero when computing the ratio turbo_freq/base_freq necessary for frequency invariant accounting. It is to be noted that even if MSR_PLATFORM_INFO contained the appropriate data, APERF and MPERF are constantly zero on ESXi 5.5, thus freq-invariance couldn't be done in principle (not that it would make a lot of sense in a VM anyway). The real problem is advertising X86_FEATURE_APERFMPERF. This appears to be fixed in more recent versions: ESXi 6.7 doesn't advertise that feature. Fixes: 1567c3e3467c ("x86, sched: Add support for frequency invariance") Signed-off-by: Giovanni Gherdovich Signed-off-by: Peter Zijlstra (Intel) Acked-by: Rafael J. Wysocki Link: https://lkml.kernel.org/r/20200416054745.740-2-ggherdovich@suse.cz --- arch/x86/kernel/smpboot.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index fe3ab9632f3b..3a318ec9bc17 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1985,6 +1985,15 @@ static bool intel_set_max_freq_ratio(void) return false; out: + /* + * Some hypervisors advertise X86_FEATURE_APERFMPERF + * but then fill all MSR's with zeroes. + */ + if (!base_freq) { + pr_debug("Couldn't determine cpu base frequency, necessary for scale-invariant accounting.\n"); + return false; + } + arch_turbo_freq_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq); arch_set_max_freq_ratio(turbo_disabled()); -- cgit From 23ccee22e834eca236b9a20989caf6905bd6954a Mon Sep 17 00:00:00 2001 From: Giovanni Gherdovich Date: Thu, 16 Apr 2020 07:47:43 +0200 Subject: x86, sched: Account for CPUs with less than 4 cores in freq. invariance If a CPU has less than 4 physical cores, MSR_TURBO_RATIO_LIMIT will rightfully report that the 4C turbo ratio is zero. In such cases, use the 1C turbo ratio instead for frequency invariance calculations. Fixes: 1567c3e3467c ("x86, sched: Add support for frequency invariance") Reported-by: Like Xu Reported-by: Neil Rickert Signed-off-by: Giovanni Gherdovich Signed-off-by: Peter Zijlstra (Intel) Acked-by: Rafael J. Wysocki Tested-by: Dave Kleikamp Link: https://lkml.kernel.org/r/20200416054745.740-3-ggherdovich@suse.cz --- arch/x86/kernel/smpboot.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 3a318ec9bc17..5d346b70844b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1945,18 +1945,23 @@ static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size) static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq) { + u64 msr; int err; err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; - err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, turbo_freq); + err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr); if (err) return false; - *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ - *turbo_freq = (*turbo_freq >> 24) & 0xFF; /* 4C turbo */ + *base_freq = (*base_freq >> 8) & 0xFF; /* max P state */ + *turbo_freq = (msr >> 24) & 0xFF; /* 4C turbo */ + + /* The CPU may have less than 4 cores */ + if (!*turbo_freq) + *turbo_freq = msr & 0xFF; /* 1C turbo */ return true; } -- cgit From b56e7d45e80796ca963ac10902245b244d823caf Mon Sep 17 00:00:00 2001 From: "Peter Zijlstra (Intel)" Date: Thu, 16 Apr 2020 07:47:44 +0200 Subject: x86, sched: Don't enable static key when starting secondary CPUs The static key arch_scale_freq_key only needs to be enabled once (at boot). This change fixes a bug by which the key was enabled every time cpu0 is started, even as a secondary CPU during cpu hotplug. Secondary CPUs are started from the idle thread: setting a static key from there means acquiring a lock and may result in sleeping in the idle task, causing CPU lockup. Another consequence of this change is that init_counter_refs() is now called on each CPU correctly; previously the function on_each_cpu() was used, but it was called at boot when the only online cpu is cpu0. [ggherdovich@suse.cz: Tested and wrote changelog] Fixes: 1567c3e3467c ("x86, sched: Add support for frequency invariance") Reported-by: Chris Wilson Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Giovanni Gherdovich Signed-off-by: Peter Zijlstra (Intel) Acked-by: Rafael J. Wysocki Link: https://lkml.kernel.org/r/20200416054745.740-4-ggherdovich@suse.cz --- arch/x86/kernel/smpboot.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5d346b70844b..dd8e15f648bc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -147,7 +147,7 @@ static inline void smpboot_restore_warm_reset_vector(void) *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; } -static void init_freq_invariance(void); +static void init_freq_invariance(bool secondary); /* * Report back to the Boot Processor during boot time or to the caller processor @@ -185,7 +185,7 @@ static void smp_callin(void) */ set_cpu_sibling_map(raw_smp_processor_id()); - init_freq_invariance(); + init_freq_invariance(true); /* * Get our bogomips. @@ -1341,7 +1341,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) set_sched_topology(x86_topology); set_cpu_sibling_map(0); - init_freq_invariance(); + init_freq_invariance(false); smp_sanity_check(); switch (apic_intr_mode) { @@ -2005,7 +2005,7 @@ out: return true; } -static void init_counter_refs(void *arg) +static void init_counter_refs(void) { u64 aperf, mperf; @@ -2016,18 +2016,25 @@ static void init_counter_refs(void *arg) this_cpu_write(arch_prev_mperf, mperf); } -static void init_freq_invariance(void) +static void init_freq_invariance(bool secondary) { bool ret = false; - if (smp_processor_id() != 0 || !boot_cpu_has(X86_FEATURE_APERFMPERF)) + if (!boot_cpu_has(X86_FEATURE_APERFMPERF)) return; + if (secondary) { + if (static_branch_likely(&arch_scale_freq_key)) { + init_counter_refs(); + } + return; + } + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) ret = intel_set_max_freq_ratio(); if (ret) { - on_each_cpu(init_counter_refs, NULL, 1); + init_counter_refs(); static_branch_enable(&arch_scale_freq_key); } else { pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n"); -- cgit From db441bd9f630329c402d5cdd319f11bfcf509fb6 Mon Sep 17 00:00:00 2001 From: Giovanni Gherdovich Date: Thu, 16 Apr 2020 07:47:45 +0200 Subject: x86, sched: Move check for CPU type to caller function Improve readability of the function intel_set_max_freq_ratio() by moving the check for KNL CPUs there, together with checks for GLM and SKX. Signed-off-by: Giovanni Gherdovich Signed-off-by: Peter Zijlstra (Intel) Acked-by: Rafael J. Wysocki Link: https://lkml.kernel.org/r/20200416054745.740-5-ggherdovich@suse.cz --- arch/x86/kernel/smpboot.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index dd8e15f648bc..8c89e4d9ad28 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1877,9 +1877,6 @@ static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int err, i; u64 msr; - if (!x86_match_cpu(has_knl_turbo_ratio_limits)) - return false; - err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq); if (err) return false; @@ -1977,7 +1974,8 @@ static bool intel_set_max_freq_ratio(void) skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) goto out; - if (knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) + if (x86_match_cpu(has_knl_turbo_ratio_limits) && + knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1)) goto out; if (x86_match_cpu(has_skx_turbo_ratio_limits) && -- cgit From eaf5a92ebde5bca3bb2565616115bd6d579486cd Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Thu, 16 Apr 2020 09:59:56 +0100 Subject: sched/core: Fix reset-on-fork from RT with uclamp uclamp_fork() resets the uclamp values to their default when the reset-on-fork flag is set. It also checks whether the task has a RT policy, and sets its uclamp.min to 1024 accordingly. However, during reset-on-fork, the task's policy is lowered to SCHED_NORMAL right after, hence leading to an erroneous uclamp.min setting for the new task if it was forked from RT. Fix this by removing the unnecessary check on rt_task() in uclamp_fork() as this doesn't make sense if the reset-on-fork flag is set. Fixes: 1a00d999971c ("sched/uclamp: Set default clamps for RT tasks") Reported-by: Chitti Babu Theegala Signed-off-by: Quentin Perret Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Patrick Bellasi Reviewed-by: Dietmar Eggemann Link: https://lkml.kernel.org/r/20200416085956.217587-1-qperret@google.com --- kernel/sched/core.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3a61a3b8eaa9..9a2fbf98fd6f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1232,13 +1232,8 @@ static void uclamp_fork(struct task_struct *p) return; for_each_clamp_id(clamp_id) { - unsigned int clamp_value = uclamp_none(clamp_id); - - /* By default, RT tasks always get 100% boost */ - if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) - clamp_value = uclamp_none(UCLAMP_MAX); - - uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false); + uclamp_se_set(&p->uclamp_req[clamp_id], + uclamp_none(clamp_id), false); } } -- cgit