summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2023-10-02 13:59:59 +0200
committerBorislav Petkov (AMD) <bp@alien8.de>2023-10-24 15:05:54 +0200
commit0772b9aa1a8f7322dce8588c231cff8b57298a53 (patch)
tree635b14036ad5f1d2d4590d19b083d860cabed888 /arch/x86/kernel/cpu
parent6f059e634dcd0d725854514c94c114bbdd83950d (diff)
x86/microcode: Sanitize __wait_for_cpus()
The code is too complicated for no reason: - The return value is pointless as this is a strict boolean. - It's way simpler to count down from num_online_cpus() and check for zero. - The timeout argument is pointless as this is always one second. - Touching the NMI watchdog every 100ns does not make any sense, neither does checking every 100ns. This is really not a hotpath operation. Preload the atomic counter with the number of online CPUs and simplify the whole timeout logic. Delay for one microsecond and touch the NMI watchdog once per millisecond. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20231002115903.204251527@linutronix.de
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 63c4e120d7fd..9fd5a9618c50 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -252,31 +252,26 @@ static struct platform_device *microcode_pdev;
* requirement can be relaxed in the future. Right now, this is conservative
* and good.
*/
-#define SPINUNIT 100 /* 100 nsec */
+static atomic_t late_cpus_in, late_cpus_out;
-
-static atomic_t late_cpus_in;
-static atomic_t late_cpus_out;
-
-static int __wait_for_cpus(atomic_t *t, long long timeout)
+static bool wait_for_cpus(atomic_t *cnt)
{
- int all_cpus = num_online_cpus();
+ unsigned int timeout;
- atomic_inc(t);
+ WARN_ON_ONCE(atomic_dec_return(cnt) < 0);
- while (atomic_read(t) < all_cpus) {
- if (timeout < SPINUNIT) {
- pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
- all_cpus - atomic_read(t));
- return 1;
- }
+ for (timeout = 0; timeout < USEC_PER_SEC; timeout++) {
+ if (!atomic_read(cnt))
+ return true;
- ndelay(SPINUNIT);
- timeout -= SPINUNIT;
+ udelay(1);
- touch_nmi_watchdog();
+ if (!(timeout % USEC_PER_MSEC))
+ touch_nmi_watchdog();
}
- return 0;
+ /* Prevent the late comers from making progress and let them time out */
+ atomic_inc(cnt);
+ return false;
}
/*
@@ -294,7 +289,7 @@ static int __reload_late(void *info)
* Wait for all CPUs to arrive. A load will not be attempted unless all
* CPUs show up.
* */
- if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
+ if (!wait_for_cpus(&late_cpus_in))
return -1;
/*
@@ -317,7 +312,7 @@ static int __reload_late(void *info)
}
wait_for_siblings:
- if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC))
+ if (!wait_for_cpus(&late_cpus_out))
panic("Timeout during microcode update!\n");
/*
@@ -344,8 +339,8 @@ static int microcode_reload_late(void)
pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n");
pr_err("You should switch to early loading, if possible.\n");
- atomic_set(&late_cpus_in, 0);
- atomic_set(&late_cpus_out, 0);
+ atomic_set(&late_cpus_in, num_online_cpus());
+ atomic_set(&late_cpus_out, num_online_cpus());
/*
* Take a snapshot before the microcode update in order to compare and