diff options
Diffstat (limited to 'arch/x86/kernel/cpu/microcode')
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/Makefile | 4 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/amd.c | 898 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/amd_shas.c | 556 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 843 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/intel-ucode-defs.h | 160 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/intel.c | 1142 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/internal.h | 136 |
7 files changed, 2695 insertions, 1044 deletions
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile index 34098d48c48f..193d98b33a0a 100644 --- a/arch/x86/kernel/cpu/microcode/Makefile +++ b/arch/x86/kernel/cpu/microcode/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only microcode-y := core.o obj-$(CONFIG_MICROCODE) += microcode.o -microcode-$(CONFIG_MICROCODE_INTEL) += intel.o -microcode-$(CONFIG_MICROCODE_AMD) += amd.o +microcode-$(CONFIG_CPU_SUP_INTEL) += intel.o +microcode-$(CONFIG_CPU_SUP_AMD) += amd.o diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 56471f750762..3821a985f4ff 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -23,24 +23,102 @@ #include <linux/earlycpio.h> #include <linux/firmware.h> +#include <linux/bsearch.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/initrd.h> #include <linux/kernel.h> #include <linux/pci.h> -#include <asm/microcode_amd.h> +#include <crypto/sha2.h> + #include <asm/microcode.h> #include <asm/processor.h> +#include <asm/cmdline.h> #include <asm/setup.h> #include <asm/cpu.h> #include <asm/msr.h> +#include <asm/tlb.h> + +#include "internal.h" + +struct ucode_patch { + struct list_head plist; + void *data; + unsigned int size; + u32 patch_id; + u16 equiv_cpu; +}; + +static LIST_HEAD(microcode_cache); + +#define UCODE_MAGIC 0x00414d44 +#define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 +#define UCODE_UCODE_TYPE 0x00000001 + +#define SECTION_HDR_SIZE 8 +#define CONTAINER_HDR_SZ 12 + +struct equiv_cpu_entry { + u32 installed_cpu; + u32 fixed_errata_mask; + u32 fixed_errata_compare; + u16 equiv_cpu; + u16 res; +} __packed; + +struct microcode_header_amd { + u32 data_code; + u32 patch_id; + u16 mc_patch_data_id; + u8 mc_patch_data_len; + u8 init_flag; + u32 mc_patch_data_checksum; + u32 nb_dev_id; + u32 sb_dev_id; + u16 processor_rev_id; + u8 nb_rev_id; + u8 sb_rev_id; + u8 bios_api_rev; + u8 reserved1[3]; + u32 match_reg[8]; +} __packed; + +struct microcode_amd { + struct microcode_header_amd hdr; + unsigned int mpb[]; +}; static struct equiv_cpu_table { unsigned int num_entries; struct equiv_cpu_entry *entry; } equiv_table; +union zen_patch_rev { + struct { + __u32 rev : 8, + stepping : 4, + model : 4, + __reserved : 4, + ext_model : 4, + ext_fam : 8; + }; + __u32 ucode_rev; +}; + +union cpuid_1_eax { + struct { + __u32 stepping : 4, + model : 4, + family : 4, + __reserved0 : 4, + ext_model : 4, + ext_fam : 8, + __reserved1 : 4; + }; + __u32 full; +}; + /* * This points to the current valid container of microcode patches which we will * save from the initrd/builtin before jettisoning its contents. @mc is the @@ -48,26 +126,230 @@ static struct equiv_cpu_table { */ struct cont_desc { struct microcode_amd *mc; - u32 cpuid_1_eax; u32 psize; u8 *data; size_t size; }; -static u32 ucode_new_rev; -static u8 amd_ucode_patch[PATCH_MAX_SIZE]; - /* * Microcode patch container file is prepended to the initrd in cpio - * format. See Documentation/x86/microcode.rst + * format. See Documentation/arch/x86/microcode.rst */ static const char ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; +/* + * This is CPUID(1).EAX on the BSP. It is used in two ways: + * + * 1. To ignore the equivalence table on Zen1 and newer. + * + * 2. To match which patches to load because the patch revision ID + * already contains the f/m/s for which the microcode is destined + * for. + */ +static u32 bsp_cpuid_1_eax __ro_after_init; + +static bool sha_check = true; + +struct patch_digest { + u32 patch_id; + u8 sha256[SHA256_DIGEST_SIZE]; +}; + +#include "amd_shas.c" + +static int cmp_id(const void *key, const void *elem) +{ + struct patch_digest *pd = (struct patch_digest *)elem; + u32 patch_id = *(u32 *)key; + + if (patch_id == pd->patch_id) + return 0; + else if (patch_id < pd->patch_id) + return -1; + else + return 1; +} + +static u32 cpuid_to_ucode_rev(unsigned int val) +{ + union zen_patch_rev p = {}; + union cpuid_1_eax c; + + c.full = val; + + p.stepping = c.stepping; + p.model = c.model; + p.ext_model = c.ext_model; + p.ext_fam = c.ext_fam; + + return p.ucode_rev; +} + +static u32 get_cutoff_revision(u32 rev) +{ + switch (rev >> 8) { + case 0x80012: return 0x8001277; break; + case 0x80082: return 0x800820f; break; + case 0x83010: return 0x830107c; break; + case 0x86001: return 0x860010e; break; + case 0x86081: return 0x8608108; break; + case 0x87010: return 0x8701034; break; + case 0x8a000: return 0x8a0000a; break; + case 0xa0010: return 0xa00107a; break; + case 0xa0011: return 0xa0011da; break; + case 0xa0012: return 0xa001243; break; + case 0xa0082: return 0xa00820e; break; + case 0xa1011: return 0xa101153; break; + case 0xa1012: return 0xa10124e; break; + case 0xa1081: return 0xa108109; break; + case 0xa2010: return 0xa20102f; break; + case 0xa2012: return 0xa201212; break; + case 0xa4041: return 0xa404109; break; + case 0xa5000: return 0xa500013; break; + case 0xa6012: return 0xa60120a; break; + case 0xa7041: return 0xa704109; break; + case 0xa7052: return 0xa705208; break; + case 0xa7080: return 0xa708009; break; + case 0xa70c0: return 0xa70C009; break; + case 0xaa001: return 0xaa00116; break; + case 0xaa002: return 0xaa00218; break; + case 0xb0021: return 0xb002146; break; + case 0xb0081: return 0xb008111; break; + case 0xb1010: return 0xb101046; break; + case 0xb2040: return 0xb204031; break; + case 0xb4040: return 0xb404031; break; + case 0xb4041: return 0xb404101; break; + case 0xb6000: return 0xb600031; break; + case 0xb6080: return 0xb608031; break; + case 0xb7000: return 0xb700031; break; + default: break; + + } + return 0; +} + +static bool need_sha_check(u32 cur_rev) +{ + u32 cutoff; + + if (!cur_rev) { + cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); + pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev); + } + + cutoff = get_cutoff_revision(cur_rev); + if (cutoff) + return cur_rev <= cutoff; + + pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n"); + pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev); + return true; +} + +static bool cpu_has_entrysign(void) +{ + unsigned int fam = x86_family(bsp_cpuid_1_eax); + unsigned int model = x86_model(bsp_cpuid_1_eax); + + if (fam == 0x17 || fam == 0x19) + return true; + + if (fam == 0x1a) { + if (model <= 0x2f || + (0x40 <= model && model <= 0x4f) || + (0x60 <= model && model <= 0x6f)) + return true; + } + + return false; +} + +static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len) +{ + struct patch_digest *pd = NULL; + u8 digest[SHA256_DIGEST_SIZE]; + int i; + + if (!cpu_has_entrysign()) + return true; + + if (!need_sha_check(cur_rev)) + return true; + + if (!sha_check) + return true; + + pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id); + if (!pd) { + pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id); + return false; + } + + sha256(data, len, digest); + + if (memcmp(digest, pd->sha256, sizeof(digest))) { + pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id); + + for (i = 0; i < SHA256_DIGEST_SIZE; i++) + pr_cont("0x%x ", digest[i]); + pr_info("\n"); + + return false; + } + + return true; +} + +static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val) +{ + union zen_patch_rev p; + union cpuid_1_eax c; + + p.ucode_rev = val; + c.full = 0; + + c.stepping = p.stepping; + c.model = p.model; + c.ext_model = p.ext_model; + c.family = 0xf; + c.ext_fam = p.ext_fam; + + return c; +} + +static u32 get_patch_level(void) +{ + u32 rev, dummy __always_unused; + + if (IS_ENABLED(CONFIG_MICROCODE_DBG)) { + int cpu = smp_processor_id(); + + if (!microcode_rev[cpu]) { + if (!base_rev) + base_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); + + microcode_rev[cpu] = base_rev; + + ucode_dbg("CPU%d, base_rev: 0x%x\n", cpu, base_rev); + } + + return microcode_rev[cpu]; + } + + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + + return rev; +} + static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) { unsigned int i; + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return 0; + if (!et || !et->num_entries) return 0; @@ -76,32 +358,26 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) if (sig == e->installed_cpu) return e->equiv_cpu; - - e++; } return 0; } /* * Check whether there is a valid microcode container file at the beginning - * of @buf of size @buf_size. Set @early to use this function in the early path. + * of @buf of size @buf_size. */ -static bool verify_container(const u8 *buf, size_t buf_size, bool early) +static bool verify_container(const u8 *buf, size_t buf_size) { u32 cont_magic; if (buf_size <= CONTAINER_HDR_SZ) { - if (!early) - pr_debug("Truncated microcode container header.\n"); - + ucode_dbg("Truncated microcode container header.\n"); return false; } cont_magic = *(const u32 *)buf; if (cont_magic != UCODE_MAGIC) { - if (!early) - pr_debug("Invalid magic value (0x%08x).\n", cont_magic); - + ucode_dbg("Invalid magic value (0x%08x).\n", cont_magic); return false; } @@ -110,23 +386,24 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated CPU equivalence table at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. */ -static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +static bool verify_equivalence_table(const u8 *buf, size_t buf_size) { const u32 *hdr = (const u32 *)buf; u32 cont_type, equiv_tbl_len; - if (!verify_container(buf, buf_size, early)) + if (!verify_container(buf, buf_size)) return false; + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return true; + cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { - if (!early) - pr_debug("Wrong microcode container equivalence table type: %u.\n", - cont_type); - + ucode_dbg("Wrong microcode container equivalence table type: %u.\n", + cont_type); return false; } @@ -135,9 +412,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) equiv_tbl_len = hdr[2]; if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || buf_size < equiv_tbl_len) { - if (!early) - pr_debug("Truncated equivalence table.\n"); - + ucode_dbg("Truncated equivalence table.\n"); return false; } @@ -146,22 +421,18 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated microcode patch section at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. * * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ -static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; if (buf_size < SECTION_HDR_SIZE) { - if (!early) - pr_debug("Truncated patch section.\n"); - + ucode_dbg("Truncated patch section.\n"); return false; } @@ -170,17 +441,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early p_size = hdr[1]; if (p_type != UCODE_UCODE_TYPE) { - if (!early) - pr_debug("Invalid type field (0x%x) in container file section header.\n", - p_type); - + ucode_dbg("Invalid type field (0x%x) in container file section header.\n", + p_type); return false; } if (p_size < sizeof(struct microcode_header_amd)) { - if (!early) - pr_debug("Patch of size %u too short.\n", p_size); - + ucode_dbg("Patch of size %u too short.\n", p_size); return false; } @@ -195,12 +462,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early * exceed the per-family maximum). @sh_psize is the size read from the section * header. */ -static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size) +static bool __verify_patch_size(u32 sh_psize, size_t buf_size) { + u8 family = x86_family(bsp_cpuid_1_eax); u32 max_size; if (family >= 0x15) - return min_t(u32, sh_psize, buf_size); + goto ret; #define F1XH_MPB_MAX_SIZE 2048 #define F14H_MPB_MAX_SIZE 1824 @@ -214,13 +482,15 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size break; default: WARN(1, "%s: WTF family: 0x%x\n", __func__, family); - return 0; + return false; } - if (sh_psize > min_t(u32, buf_size, max_size)) - return 0; + if (sh_psize > max_size) + return false; - return sh_psize; +ret: + /* Working with the whole buffer so < is ok. */ + return sh_psize <= buf_size; } /* @@ -231,16 +501,16 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size * positive: patch is not for this family, skip it * 0: success */ -static int -verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size) { + u8 family = x86_family(bsp_cpuid_1_eax); struct microcode_header_amd *mc_hdr; - unsigned int ret; + u32 cur_rev, cutoff, patch_rev; u32 sh_psize; u16 proc_id; u8 patch_fam; - if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + if (!__verify_patch_section(buf, buf_size, &sh_psize)) return -1; /* @@ -255,16 +525,12 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea * size sh_psize, as the section claims. */ if (buf_size < sh_psize) { - if (!early) - pr_debug("Patch of size %u truncated.\n", sh_psize); - + ucode_dbg("Patch of size %u truncated.\n", sh_psize); return -1; } - ret = __verify_patch_size(family, sh_psize, buf_size); - if (!ret) { - if (!early) - pr_debug("Per-family patch size mismatch.\n"); + if (!__verify_patch_size(sh_psize, buf_size)) { + ucode_dbg("Per-family patch size mismatch.\n"); return -1; } @@ -272,23 +538,55 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - if (!early) - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); return -1; } proc_id = mc_hdr->processor_rev_id; patch_fam = 0xf + (proc_id >> 12); + if (patch_fam != family) return 1; + cur_rev = get_patch_level(); + + /* No cutoff revision means old/unaffected by signing algorithm weakness => matches */ + cutoff = get_cutoff_revision(cur_rev); + if (!cutoff) + goto ok; + + patch_rev = mc_hdr->patch_id; + + ucode_dbg("cur_rev: 0x%x, cutoff: 0x%x, patch_rev: 0x%x\n", + cur_rev, cutoff, patch_rev); + + if (cur_rev <= cutoff && patch_rev <= cutoff) + goto ok; + + if (cur_rev > cutoff && patch_rev > cutoff) + goto ok; + + return 1; + +ok: + ucode_dbg("Patch-ID 0x%08x: family: 0x%x\n", mc_hdr->patch_id, patch_fam); + return 0; } +static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id) +{ + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax; + else + return eq_id == mc->hdr.processor_rev_id; +} + /* * This scans the ucode blob for the proper container as we can have multiple - * containers glued together. Returns the equivalence ID from the equivalence - * table or 0 if none found. + * containers glued together. + * * Returns the amount of bytes consumed while scanning. @desc contains all the * data we're going to use in later stages of the application. */ @@ -300,7 +598,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u16 eq_id; u8 *buf; - if (!verify_equivalence_table(ucode, size, true)) + if (!verify_equivalence_table(ucode, size)) return 0; buf = ucode; @@ -313,7 +611,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) * doesn't contain a patch for the CPU, scan through the whole container * so that it can be skipped in case there are other containers appended. */ - eq_id = find_equiv_id(&table, desc->cpuid_1_eax); + eq_id = find_equiv_id(&table, bsp_cpuid_1_eax); buf += hdr[2] + CONTAINER_HDR_SZ; size -= hdr[2] + CONTAINER_HDR_SZ; @@ -327,11 +625,12 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u32 patch_size; int ret; - ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + ret = verify_patch(buf, size, &patch_size); if (ret < 0) { /* - * Patch verification failed, skip to the next - * container, if there's one: + * Patch verification failed, skip to the next container, if + * there is one. Before exit, check whether that container has + * found a patch already. If so, use it. */ goto out; } else if (ret > 0) { @@ -339,9 +638,12 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) } mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE); - if (eq_id == mc->hdr.processor_rev_id) { + + if (mc_patch_matches(mc, eq_id)) { desc->psize = patch_size; desc->mc = mc; + + ucode_dbg(" match: size: %d\n", patch_size); } skip: @@ -350,6 +652,7 @@ skip: size -= patch_size + SECTION_HDR_SIZE; } +out: /* * If we have found a patch (desc->mc), it means we're looking at the * container which has a patch for this CPU so return 0 to mean, @ucode @@ -364,7 +667,6 @@ skip: return 0; } -out: return orig_size - size; } @@ -389,80 +691,47 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) } } -static int __apply_microcode_amd(struct microcode_amd *mc) +static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev, + unsigned int psize) { - u32 rev, dummy; - - native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code); - - /* verify patch application was successful */ - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - if (rev != mc->hdr.patch_id) - return -1; - - return 0; -} + unsigned long p_addr = (unsigned long)&mc->hdr.data_code; -/* - * Early load occurs before we can vmalloc(). So we look for the microcode - * patch container file in initrd, traverse equivalent cpu table, look for a - * matching microcode patch, and update, all in initrd memory in place. - * When vmalloc() is available for use later -- on 64-bit during first AP load, - * and on 32-bit during save_microcode_in_initrd_amd() -- we can call - * load_microcode_amd() to save equivalent cpu table and microcode patches in - * kernel heap memory. - * - * Returns true if container found (sets @desc), false otherwise. - */ -static bool -apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch) -{ - struct cont_desc desc = { 0 }; - u8 (*patch)[PATCH_MAX_SIZE]; - struct microcode_amd *mc; - u32 rev, dummy, *new_rev; - bool ret = false; + if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize)) + return false; -#ifdef CONFIG_X86_32 - new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); - patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); -#else - new_rev = &ucode_new_rev; - patch = &amd_ucode_patch; -#endif + native_wrmsrq(MSR_AMD64_PATCH_LOADER, p_addr); - desc.cpuid_1_eax = cpuid_1_eax; + if (x86_family(bsp_cpuid_1_eax) == 0x17) { + unsigned long p_addr_end = p_addr + psize - 1; - scan_containers(ucode, size, &desc); + invlpg(p_addr); - mc = desc.mc; - if (!mc) - return ret; + /* + * Flush next page too if patch image is crossing a page + * boundary. + */ + if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT) + invlpg(p_addr_end); + } - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + if (IS_ENABLED(CONFIG_MICROCODE_DBG)) + microcode_rev[smp_processor_id()] = mc->hdr.patch_id; - /* - * Allow application of the same revision to pick up SMT-specific - * changes even if the revision of the other SMT thread is already - * up-to-date. - */ - if (rev > mc->hdr.patch_id) - return ret; + /* verify patch application was successful */ + *cur_rev = get_patch_level(); - if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - ret = true; + ucode_dbg("updated rev: 0x%x\n", *cur_rev); - if (save_patch) - memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE)); - } + if (*cur_rev != mc->hdr.patch_id) + return false; - return ret; + return true; } -static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) +static bool get_builtin_microcode(struct cpio_data *cp) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; + u8 family = x86_family(bsp_cpuid_1_eax); struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) @@ -470,7 +739,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) if (family >= 0x15) snprintf(fw_name, sizeof(fw_name), - "amd-ucode/microcode_amd_fam%.2xh.bin", family); + "amd-ucode/microcode_amd_fam%02hhxh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; @@ -481,146 +750,142 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) return false; } -static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret) +static bool __init find_blobs_in_containers(struct cpio_data *ret) { - struct ucode_cpu_info *uci; struct cpio_data cp; - const char *path; - bool use_pa; + bool found; - if (IS_ENABLED(CONFIG_X86_32)) { - uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - uci = ucode_cpu_info; - path = ucode_path; - use_pa = false; - } + if (!get_builtin_microcode(&cp)) + cp = find_microcode_in_initrd(ucode_path); - if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) - cp = find_microcode_in_initrd(path, use_pa); + found = cp.data && cp.size; + if (found) + *ret = cp; - /* Needed in load_microcode_amd() */ - uci->cpu_sig.sig = cpuid_1_eax; - - *ret = cp; + return found; } -void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) -{ - struct cpio_data cp = { }; - - __load_ucode_amd(cpuid_1_eax, &cp); - if (!(cp.data && cp.size)) - return; - - apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true); -} - -void load_ucode_amd_ap(unsigned int cpuid_1_eax) +/* + * Early load occurs before we can vmalloc(). So we look for the microcode + * patch container file in initrd, traverse equivalent cpu table, look for a + * matching microcode patch, and update, all in initrd memory in place. + * When vmalloc() is available for use later -- on 64-bit during first AP load, + * and on 32-bit during save_microcode_in_initrd() -- we can call + * load_microcode_amd() to save equivalent cpu table and microcode patches in + * kernel heap memory. + */ +void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) { + struct cont_desc desc = { }; struct microcode_amd *mc; - struct cpio_data cp; - u32 *new_rev, rev, dummy; - - if (IS_ENABLED(CONFIG_X86_32)) { - mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); - new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); - } else { - mc = (struct microcode_amd *)amd_ucode_patch; - new_rev = &ucode_new_rev; - } - - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - - /* - * Check whether a new patch has been saved already. Also, allow application of - * the same revision in order to pick up SMT-thread-specific configuration even - * if the sibling SMT thread already has an up-to-date revision. - */ - if (*new_rev && rev <= mc->hdr.patch_id) { - if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - return; + struct cpio_data cp = { }; + char buf[4]; + u32 rev; + + if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) { + if (!strncmp(buf, "off", 3)) { + sha_check = false; + pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); } } - __load_ucode_amd(cpuid_1_eax, &cp); - if (!(cp.data && cp.size)) - return; + bsp_cpuid_1_eax = cpuid_1_eax; - apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false); -} + rev = get_patch_level(); + ed->old_rev = rev; -static enum ucode_state -load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); - -int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) -{ - struct cont_desc desc = { 0 }; - enum ucode_state ret; - struct cpio_data cp; - - cp = find_microcode_in_initrd(ucode_path, false); - if (!(cp.data && cp.size)) - return -EINVAL; + /* Needed in load_microcode_amd() */ + ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; - desc.cpuid_1_eax = cpuid_1_eax; + if (!find_blobs_in_containers(&cp)) + return; scan_containers(cp.data, cp.size, &desc); - if (!desc.mc) - return -EINVAL; - ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); - if (ret > UCODE_UPDATED) - return -EINVAL; + mc = desc.mc; + if (!mc) + return; - return 0; + /* + * Allow application of the same revision to pick up SMT-specific + * changes even if the revision of the other SMT thread is already + * up-to-date. + */ + if (ed->old_rev > mc->hdr.patch_id) + return; + + if (__apply_microcode_amd(mc, &rev, desc.psize)) + ed->new_rev = rev; } -void reload_ucode_amd(void) +static inline bool patch_cpus_equivalent(struct ucode_patch *p, + struct ucode_patch *n, + bool ignore_stepping) { - struct microcode_amd *mc; - u32 rev, dummy __always_unused; - - mc = (struct microcode_amd *)amd_ucode_patch; - - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - - if (rev < mc->hdr.patch_id) { - if (!__apply_microcode_amd(mc)) { - ucode_new_rev = mc->hdr.patch_id; - pr_info("reload patch_level=0x%08x\n", ucode_new_rev); + /* Zen and newer hardcode the f/m/s in the patch ID */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) { + union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id); + union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id); + + if (ignore_stepping) { + p_cid.stepping = 0; + n_cid.stepping = 0; } + + return p_cid.full == n_cid.full; + } else { + return p->equiv_cpu == n->equiv_cpu; } } -static u16 __find_equiv_id(unsigned int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - return find_equiv_id(&equiv_table, uci->cpu_sig.sig); -} /* * a small, trivial cache of per-family ucode patches */ -static struct ucode_patch *cache_find_patch(u16 equiv_cpu) +static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu) { struct ucode_patch *p; + struct ucode_patch n; + + n.equiv_cpu = equiv_cpu; + n.patch_id = uci->cpu_sig.rev; list_for_each_entry(p, µcode_cache, plist) - if (p->equiv_cpu == equiv_cpu) + if (patch_cpus_equivalent(p, &n, false)) return p; + return NULL; } +static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n) +{ + /* Zen and newer hardcode the f/m/s in the patch ID */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) { + union zen_patch_rev zp, zn; + + zp.ucode_rev = p->patch_id; + zn.ucode_rev = n->patch_id; + + if (zn.stepping != zp.stepping) + return -1; + + return zn.rev > zp.rev; + } else { + return n->patch_id > p->patch_id; + } +} + static void update_cache(struct ucode_patch *new_patch) { struct ucode_patch *p; + int ret; list_for_each_entry(p, µcode_cache, plist) { - if (p->equiv_cpu == new_patch->equiv_cpu) { - if (p->patch_id >= new_patch->patch_id) { + if (patch_cpus_equivalent(p, new_patch, true)) { + ret = patch_newer(p, new_patch); + if (ret < 0) + continue; + else if (!ret) { /* we already have the latest patch */ kfree(new_patch->data); kfree(new_patch); @@ -650,23 +915,46 @@ static void free_cache(void) static struct ucode_patch *find_patch(unsigned int cpu) { - u16 equiv_id; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + u16 equiv_id = 0; - equiv_id = __find_equiv_id(cpu); - if (!equiv_id) - return NULL; + uci->cpu_sig.rev = get_patch_level(); - return cache_find_patch(equiv_id); + if (x86_family(bsp_cpuid_1_eax) < 0x17) { + equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); + if (!equiv_id) + return NULL; + } + + return cache_find_patch(uci, equiv_id); +} + +void reload_ucode_amd(unsigned int cpu) +{ + u32 rev, dummy __always_unused; + struct microcode_amd *mc; + struct ucode_patch *p; + + p = find_patch(cpu); + if (!p) + return; + + mc = p->data; + + rev = get_patch_level(); + if (rev < mc->hdr.patch_id) { + if (__apply_microcode_amd(mc, &rev, p->size)) + pr_info_once("reload revision: 0x%08x\n", rev); + } } static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_patch *p; csig->sig = cpuid_eax(0x00000001); - csig->rev = c->microcode; + csig->rev = get_patch_level(); /* * a patch could have been loaded early, set uci->mc so that @@ -676,8 +964,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) if (p && (p->patch_id == csig->rev)) uci->mc = p->data; - pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); - return 0; } @@ -688,7 +974,7 @@ static enum ucode_state apply_microcode_amd(int cpu) struct ucode_cpu_info *uci; struct ucode_patch *p; enum ucode_state ret; - u32 rev, dummy __always_unused; + u32 rev; BUG_ON(raw_smp_processor_id() != cpu); @@ -698,18 +984,18 @@ static enum ucode_state apply_microcode_amd(int cpu) if (!p) return UCODE_NFOUND; + rev = uci->cpu_sig.rev; + mc_amd = p->data; uci->mc = p->data; - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - /* need to apply patch? */ - if (rev >= mc_amd->hdr.patch_id) { + if (rev > mc_amd->hdr.patch_id) { ret = UCODE_OK; goto out; } - if (__apply_microcode_amd(mc_amd)) { + if (!__apply_microcode_amd(mc_amd, &rev, p->size)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); return UCODE_ERROR; @@ -718,8 +1004,6 @@ static enum ucode_state apply_microcode_amd(int cpu) rev = mc_amd->hdr.patch_id; ret = UCODE_UPDATED; - pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev); - out: uci->cpu_sig.rev = rev; c->microcode = rev; @@ -731,17 +1015,29 @@ out: return ret; } +void load_ucode_amd_ap(unsigned int cpuid_1_eax) +{ + unsigned int cpu = smp_processor_id(); + + ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax; + apply_microcode_amd(cpu); +} + static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; const u32 *hdr; - if (!verify_equivalence_table(buf, buf_size, false)) + if (!verify_equivalence_table(buf, buf_size)) return 0; hdr = (const u32 *)buf; equiv_tbl_len = hdr[2]; + /* Zen and newer do not need an equivalence table. */ + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + goto out; + equiv_table.entry = vmalloc(equiv_tbl_len); if (!equiv_table.entry) { pr_err("failed to allocate equivalent CPU table\n"); @@ -751,12 +1047,16 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len); equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry); +out: /* add header length */ return equiv_tbl_len + CONTAINER_HDR_SZ; } static void free_equiv_cpu_table(void) { + if (x86_family(bsp_cpuid_1_eax) >= 0x17) + return; + vfree(equiv_table.entry); memset(&equiv_table, 0, sizeof(equiv_table)); } @@ -782,7 +1082,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, u16 proc_id; int ret; - ret = verify_patch(family, fw, leftover, patch_size, false); + ret = verify_patch(fw, leftover, patch_size); if (ret) return ret; @@ -807,7 +1107,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, patch->patch_id = mc_hdr->patch_id; patch->equiv_cpu = proc_id; - pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", + ucode_dbg("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n", __func__, patch->patch_id, proc_id); /* ... and add to cache. */ @@ -816,8 +1116,8 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, return 0; } -static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, - size_t size) +/* Scan the blob in @data and add microcode patches to the cache. */ +static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size) { u8 *fw = (u8 *)data; size_t offset; @@ -850,40 +1150,75 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, return UCODE_OK; } -static enum ucode_state -load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) +static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size) { - struct ucode_patch *p; enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); ret = __load_microcode_amd(family, data, size); - if (ret != UCODE_OK) { + if (ret != UCODE_OK) cleanup(); - return ret; - } - p = find_patch(0); - if (!p) { + return ret; +} + +static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) +{ + struct cpuinfo_x86 *c; + unsigned int nid, cpu; + struct ucode_patch *p; + enum ucode_state ret; + + ret = _load_microcode_amd(family, data, size); + if (ret != UCODE_OK) return ret; - } else { - if (boot_cpu_data.microcode >= p->patch_id) - return ret; + + for_each_node_with_cpus(nid) { + cpu = cpumask_first(cpumask_of_node(nid)); + c = &cpu_data(cpu); + + p = find_patch(cpu); + if (!p) + continue; + + if (c->microcode >= p->patch_id) + continue; ret = UCODE_NEW; } - /* save BSP's matching patch for early load */ - if (!save) - return ret; + return ret; +} - memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); - memcpy(amd_ucode_patch, p->data, min_t(u32, p->size, PATCH_MAX_SIZE)); +static int __init save_microcode_in_initrd(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + struct cont_desc desc = { 0 }; + unsigned int cpuid_1_eax; + enum ucode_state ret; + struct cpio_data cp; - return ret; + if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) + return 0; + + cpuid_1_eax = native_cpuid_eax(1); + + if (!find_blobs_in_containers(&cp)) + return -EINVAL; + + scan_containers(cp.data, cp.size, &desc); + if (!desc.mc) + return -EINVAL; + + ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); + if (ret > UCODE_UPDATED) + return -EINVAL; + + return 0; } +early_initcall(save_microcode_in_initrd); /* * AMD microcode firmware naming convention, up to family 15h they are in @@ -905,27 +1240,25 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; - /* reload ucode container only on the boot cpu */ - if (!bsp) - return UCODE_OK; + if (force_minrev) + return UCODE_NFOUND; if (c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { - pr_debug("failed to load file %s\n", fw_name); + ucode_dbg("failed to load file %s\n", fw_name); goto out; } ret = UCODE_ERROR; - if (!verify_container(fw->data, fw->size, false)) + if (!verify_container(fw->data, fw->size)) goto fw_release; - ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); + ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); @@ -941,11 +1274,19 @@ static void microcode_fini_cpu_amd(int cpu) uci->mc = NULL; } +static void finalize_late_load_amd(int result) +{ + if (result) + cleanup(); +} + static struct microcode_ops microcode_amd_ops = { - .request_microcode_fw = request_microcode_amd, - .collect_cpu_info = collect_cpu_info_amd, - .apply_microcode = apply_microcode_amd, - .microcode_fini_cpu = microcode_fini_cpu_amd, + .request_microcode_fw = request_microcode_amd, + .collect_cpu_info = collect_cpu_info_amd, + .apply_microcode = apply_microcode_amd, + .microcode_fini_cpu = microcode_fini_cpu_amd, + .finalize_late_load = finalize_late_load_amd, + .nmi_safe = true, }; struct microcode_ops * __init init_amd_microcode(void) @@ -956,11 +1297,6 @@ struct microcode_ops * __init init_amd_microcode(void) pr_warn("AMD CPU family 0x%x not supported\n", c->x86); return NULL; } - - if (ucode_new_rev) - pr_info_once("microcode updated early to new patch_level=0x%08x\n", - ucode_new_rev); - return µcode_amd_ops; } diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c new file mode 100644 index 000000000000..1fd349cfc802 --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/amd_shas.c @@ -0,0 +1,556 @@ +/* Keep 'em sorted. */ +static const struct patch_digest phashes[] = { + { 0x8001227, { + 0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b, + 0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46, + 0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8, + 0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18, + } + }, + { 0x8001250, { + 0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60, + 0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa, + 0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3, + 0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19, + } + }, + { 0x800126e, { + 0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c, + 0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3, + 0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6, + 0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43, + } + }, + { 0x800126f, { + 0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec, + 0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b, + 0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18, + 0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33, + } + }, + { 0x800820d, { + 0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59, + 0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67, + 0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c, + 0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e, + } + }, + { 0x8301025, { + 0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36, + 0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1, + 0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30, + 0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77, + } + }, + { 0x8301055, { + 0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a, + 0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea, + 0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b, + 0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b, + } + }, + { 0x8301072, { + 0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e, + 0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28, + 0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1, + 0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30, + } + }, + { 0x830107a, { + 0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72, + 0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34, + 0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20, + 0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a, + } + }, + { 0x830107b, { + 0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad, + 0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a, + 0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04, + 0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19, + } + }, + { 0x830107c, { + 0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47, + 0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac, + 0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3, + 0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38, + } + }, + { 0x860010d, { + 0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0, + 0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7, + 0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c, + 0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8, + } + }, + { 0x8608108, { + 0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2, + 0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38, + 0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc, + 0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd, + } + }, + { 0x8701034, { + 0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83, + 0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d, + 0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8, + 0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b, + } + }, + { 0x8a00008, { + 0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e, + 0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e, + 0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72, + 0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c, + } + }, + { 0x8a0000a, { + 0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c, + 0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44, + 0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb, + 0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20, + } + }, + { 0xa00104c, { + 0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe, + 0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76, + 0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b, + 0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b, + } + }, + { 0xa00104e, { + 0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2, + 0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0, + 0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e, + 0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b, + } + }, + { 0xa001053, { + 0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d, + 0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25, + 0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb, + 0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3, + } + }, + { 0xa001058, { + 0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36, + 0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19, + 0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec, + 0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2, + } + }, + { 0xa001075, { + 0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9, + 0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a, + 0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f, + 0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0, + } + }, + { 0xa001078, { + 0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25, + 0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce, + 0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04, + 0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e, + } + }, + { 0xa001079, { + 0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb, + 0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93, + 0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb, + 0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a, + } + }, + { 0xa00107a, { + 0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f, + 0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd, + 0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f, + 0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18, + } + }, + { 0xa001143, { + 0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80, + 0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42, + 0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d, + 0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8, + } + }, + { 0xa001144, { + 0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41, + 0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b, + 0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25, + 0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc, + } + }, + { 0xa00115d, { + 0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd, + 0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75, + 0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e, + 0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05, + } + }, + { 0xa001173, { + 0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a, + 0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35, + 0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3, + 0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e, + } + }, + { 0xa0011a8, { + 0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b, + 0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6, + 0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4, + 0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e, + } + }, + { 0xa0011ce, { + 0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71, + 0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86, + 0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e, + 0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a, + } + }, + { 0xa0011d1, { + 0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e, + 0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09, + 0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5, + 0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8, + } + }, + { 0xa0011d3, { + 0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b, + 0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6, + 0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00, + 0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26, + } + }, + { 0xa0011d5, { + 0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13, + 0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7, + 0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62, + 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, + } + }, + { 0xa0011d7, { + 0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b, + 0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12, + 0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2, + 0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36, + } + }, + { 0xa001223, { + 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, + 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, + 0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15, + 0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45, + } + }, + { 0xa001224, { + 0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25, + 0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad, + 0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9, + 0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a, + } + }, + { 0xa001227, { + 0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad, + 0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d, + 0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9, + 0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0, + } + }, + { 0xa001229, { + 0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6, + 0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75, + 0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4, + 0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d, + } + }, + { 0xa00122e, { + 0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf, + 0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15, + 0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa, + 0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10, + } + }, + { 0xa001231, { + 0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e, + 0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64, + 0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b, + 0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd, + } + }, + { 0xa001234, { + 0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7, + 0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc, + 0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b, + 0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a, + } + }, + { 0xa001236, { + 0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78, + 0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d, + 0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b, + 0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25, + } + }, + { 0xa001238, { + 0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc, + 0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a, + 0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e, + 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, + } + }, + { 0xa00123b, { + 0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2, + 0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3, + 0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28, + 0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72, + } + }, + { 0xa00820c, { + 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, + 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, + 0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1, + 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, + } + }, + { 0xa00820d, { + 0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4, + 0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca, + 0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1, + 0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7, + } + }, + { 0xa10113e, { + 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, + 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, + 0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5, + 0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53, + } + }, + { 0xa101144, { + 0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26, + 0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09, + 0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc, + 0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47, + } + }, + { 0xa101148, { + 0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90, + 0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f, + 0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08, + 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, + } + }, + { 0xa10114c, { + 0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64, + 0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74, + 0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a, + 0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0, + } + }, + { 0xa10123e, { + 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, + 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, + 0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc, + 0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b, + } + }, + { 0xa101244, { + 0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c, + 0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1, + 0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72, + 0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0, + } + }, + { 0xa101248, { + 0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e, + 0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22, + 0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e, + 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, + } + }, + { 0xa10124c, { + 0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90, + 0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46, + 0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc, + 0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2, + } + }, + { 0xa108108, { + 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, + 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, + 0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c, + 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, + } + }, + { 0xa108109, { + 0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa, + 0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b, + 0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35, + 0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59, + } + }, + { 0xa20102d, { + 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, + 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, + 0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23, + 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, + } + }, + { 0xa20102e, { + 0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd, + 0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6, + 0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5, + 0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe, + } + }, + { 0xa201210, { + 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, + 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, + 0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68, + 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, + } + }, + { 0xa201211, { + 0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95, + 0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27, + 0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff, + 0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27, + } + }, + { 0xa404107, { + 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, + 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, + 0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81, + 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, + } + }, + { 0xa404108, { + 0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc, + 0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb, + 0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19, + 0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00, + } + }, + { 0xa500011, { + 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, + 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, + 0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2, + 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, + } + }, + { 0xa500012, { + 0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4, + 0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16, + 0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f, + 0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63, + } + }, + { 0xa601209, { + 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, + 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, + 0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46, + 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, + } + }, + { 0xa60120a, { + 0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d, + 0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b, + 0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d, + 0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c, + } + }, + { 0xa704107, { + 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, + 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, + 0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2, + 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, + } + }, + { 0xa704108, { + 0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93, + 0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98, + 0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49, + 0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a, + } + }, + { 0xa705206, { + 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, + 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, + 0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b, + 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, + } + }, + { 0xa705208, { + 0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19, + 0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2, + 0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11, + 0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff, + } + }, + { 0xa708007, { + 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, + 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, + 0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80, + 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, + } + }, + { 0xa708008, { + 0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46, + 0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab, + 0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16, + 0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b, + } + }, + { 0xa70c005, { + 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, + 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, + 0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55, + 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, + } + }, + { 0xa70c008, { + 0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21, + 0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e, + 0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66, + 0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0, + } + }, + { 0xaa00116, { + 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, + 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, + 0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d, + 0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9, + } + }, + { 0xaa00212, { + 0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75, + 0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71, + 0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2, + 0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1, + } + }, + { 0xaa00213, { + 0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a, + 0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f, + 0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed, + 0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b, + } + }, + { 0xaa00215, { + 0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9, + 0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4, + 0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b, + 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, + } + }, + { 0xaa00216, { + 0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5, + 0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08, + 0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2, + 0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1, + } + }, +}; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 712aafff96e0..68049f171860 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -17,12 +17,13 @@ #define pr_fmt(fmt) "microcode: " fmt -#include <linux/platform_device.h> #include <linux/stop_machine.h> +#include <linux/device/faux.h> #include <linux/syscore_ops.h> #include <linux/miscdevice.h> #include <linux/capability.h> #include <linux/firmware.h> +#include <linux/cpumask.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/mutex.h> @@ -31,45 +32,44 @@ #include <linux/fs.h> #include <linux/mm.h> -#include <asm/microcode_intel.h> +#include <asm/apic.h> #include <asm/cpu_device_id.h> -#include <asm/microcode_amd.h> #include <asm/perf_event.h> -#include <asm/microcode.h> #include <asm/processor.h> #include <asm/cmdline.h> +#include <asm/msr.h> #include <asm/setup.h> -#define DRIVER_VERSION "2.2" +#include "internal.h" -static struct microcode_ops *microcode_ops; -static bool dis_ucode_ldr = true; +static struct microcode_ops *microcode_ops; +static bool dis_ucode_ldr; -bool initrd_gone; +bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); -LIST_HEAD(microcode_cache); +/* + * Those below should be behind CONFIG_MICROCODE_DBG ifdeffery but in + * order to not uglify the code with ifdeffery and use IS_ENABLED() + * instead, leave them in. When microcode debugging is not enabled, + * those are meaningless anyway. + */ +/* base microcode revision for debugging */ +u32 base_rev; +u32 microcode_rev[NR_CPUS] = {}; /* * Synchronization. * * All non cpu-hotplug-callback call sites use: * - * - microcode_mutex to synchronize with each other; * - cpus_read_lock/unlock() to synchronize with * the cpu-hotplug-callback call sites. * * We guarantee that only a single cpu is being * updated at any particular moment of time. */ -static DEFINE_MUTEX(microcode_mutex); - struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; -struct cpu_info_ctx { - struct cpu_signature *cpu_sig; - int err; -}; - /* * Those patch levels cannot be updated to newer ones and thus should be final. */ @@ -80,6 +80,8 @@ static u32 final_levels[] = { 0, /* T-101 terminator */ }; +struct early_load_data early_data; + /* * Check the current patch level on this CPU. * @@ -92,12 +94,12 @@ static bool amd_check_current_patch_level(void) u32 lvl, dummy, i; u32 *levels; + if (x86_cpuid_vendor() != X86_VENDOR_AMD) + return false; + native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); - if (IS_ENABLED(CONFIG_X86_32)) - levels = (u32 *)__pa_nodebug(&final_levels); - else - levels = final_levels; + levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) @@ -106,38 +108,60 @@ static bool amd_check_current_patch_level(void) return false; } -static bool __init check_loader_disabled_bsp(void) +bool __init microcode_loader_disabled(void) { - static const char *__dis_opt_str = "dis_ucode_ldr"; - -#ifdef CONFIG_X86_32 - const char *cmdline = (const char *)__pa_nodebug(boot_command_line); - const char *option = (const char *)__pa_nodebug(__dis_opt_str); - bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); - -#else /* CONFIG_X86_64 */ - const char *cmdline = boot_command_line; - const char *option = __dis_opt_str; - bool *res = &dis_ucode_ldr; -#endif + if (dis_ucode_ldr) + return true; /* - * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not - * completely accurate as xen pv guests don't see that CPUID bit set but - * that's good enough as they don't land on the BSP path anyway. + * Disable when: + * + * 1) The CPU does not support CPUID. + * + * 2) Bit 31 in CPUID[1]:ECX is clear + * The bit is reserved for hypervisor use. This is still not + * completely accurate as XEN PV guests don't see that CPUID bit + * set, but that's good enough as they don't land on the BSP + * path anyway. + * + * 3) Certain AMD patch levels are not allowed to be + * overwritten. */ - if (native_cpuid_ecx(1) & BIT(31)) - return *res; + if (!cpuid_feature() || + ((native_cpuid_ecx(1) & BIT(31)) && + !IS_ENABLED(CONFIG_MICROCODE_DBG)) || + amd_check_current_patch_level()) + dis_ucode_ldr = true; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { - if (amd_check_current_patch_level()) - return *res; - } + return dis_ucode_ldr; +} - if (cmdline_find_option_bool(cmdline, option) <= 0) - *res = false; +static void __init early_parse_cmdline(void) +{ + char cmd_buf[64] = {}; + char *s, *p = cmd_buf; + + if (cmdline_find_option(boot_command_line, "microcode", cmd_buf, sizeof(cmd_buf)) > 0) { + while ((s = strsep(&p, ","))) { + if (IS_ENABLED(CONFIG_MICROCODE_DBG)) { + if (strstr(s, "base_rev=")) { + /* advance to the option arg */ + strsep(&s, "="); + if (kstrtouint(s, 16, &base_rev)) { ; } + } + } + + if (!strcmp("force_minrev", s)) + force_minrev = true; + + if (!strcmp(s, "dis_ucode_ldr")) + dis_ucode_ldr = true; + } + } - return *res; + /* old, compat option */ + if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0) + dis_ucode_ldr = true; } void __init load_ucode_bsp(void) @@ -145,7 +169,9 @@ void __init load_ucode_bsp(void) unsigned int cpuid_1_eax; bool intel = true; - if (!have_cpuid_p()) + early_parse_cmdline(); + + if (microcode_loader_disabled()) return; cpuid_1_eax = native_cpuid_eax(1); @@ -166,29 +192,22 @@ void __init load_ucode_bsp(void) return; } - if (check_loader_disabled_bsp()) - return; - if (intel) - load_ucode_intel_bsp(); + load_ucode_intel_bsp(&early_data); else - load_ucode_amd_bsp(cpuid_1_eax); -} - -static bool check_loader_disabled_ap(void) -{ -#ifdef CONFIG_X86_32 - return *((bool *)__pa_nodebug(&dis_ucode_ldr)); -#else - return dis_ucode_ldr; -#endif + load_ucode_amd_bsp(&early_data, cpuid_1_eax); } void load_ucode_ap(void) { unsigned int cpuid_1_eax; - if (check_loader_disabled_ap()) + /* + * Can't use microcode_loader_disabled() here - .init section + * hell. It doesn't have to either - the BSP variant must've + * parsed cmdline already anyway. + */ + if (dis_ucode_ldr) return; cpuid_1_eax = native_cpuid_eax(1); @@ -207,90 +226,37 @@ void load_ucode_ap(void) } } -static int __init save_microcode_in_initrd(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - int ret = -EINVAL; - - switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - if (c->x86 >= 6) - ret = save_microcode_in_initrd_intel(); - break; - case X86_VENDOR_AMD: - if (c->x86 >= 0x10) - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); - break; - default: - break; - } - - initrd_gone = true; - - return ret; -} - -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) +struct cpio_data __init find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 - struct boot_params *params; - - if (use_pa) - params = (struct boot_params *)__pa_nodebug(&boot_params); - else - params = &boot_params; - - size = params->hdr.ramdisk_size; - - /* - * Set start only if we have an initrd image. We cannot use initrd_start - * because it is not set that early yet. - */ + size = boot_params.hdr.ramdisk_size; + /* Early load on BSP has a temporary mapping. */ if (size) - start = params->hdr.ramdisk_image; + start = initrd_start_early; -# else /* CONFIG_X86_64 */ +#else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; - start += PAGE_OFFSET; } -# endif +#endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. - * - * initrd_gone is for the hotplug case where we've thrown out initrd - * already. */ - if (!use_pa) { - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; - if (initrd_start) - start = initrd_start; - } else { - /* - * The picture with physical addresses is a bit different: we - * need to get the *physical* address to which the ramdisk was - * relocated, i.e., relocated_ramdisk (not initrd_start) and - * since we're running from physical addresses, we need to access - * relocated_ramdisk through its *physical* address too. - */ - u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); - if (*rr) - start = *rr; - } + if (initrd_start) + start = initrd_start; return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ @@ -298,7 +264,7 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) #endif } -void reload_early_microcode(void) +static void reload_early_microcode(unsigned int cpu) { int vendor, family; @@ -312,7 +278,7 @@ void reload_early_microcode(void) break; case X86_VENDOR_AMD: if (family >= 0x10) - reload_ucode_amd(); + reload_ucode_amd(cpu); break; default: break; @@ -320,7 +286,7 @@ void reload_early_microcode(void) } /* fake device for request_firmware */ -static struct platform_device *microcode_pdev; +static struct faux_device *microcode_fdev; #ifdef CONFIG_MICROCODE_LATE_LOADING /* @@ -334,166 +300,474 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ -#define SPINUNIT 100 /* 100 nsec */ +enum sibling_ctrl { + /* Spinwait with timeout */ + SCTRL_WAIT, + /* Invoke the microcode_apply() callback */ + SCTRL_APPLY, + /* Proceed without invoking the microcode_apply() callback */ + SCTRL_DONE, +}; + +struct microcode_ctrl { + enum sibling_ctrl ctrl; + enum ucode_state result; + unsigned int ctrl_cpu; + bool nmi_enabled; +}; + +DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static atomic_t late_cpus_in, offline_in_nmi; +static unsigned int loops_per_usec; +static cpumask_t cpu_offline_mask; -static int check_online_cpus(void) +static noinstr bool wait_for_cpus(atomic_t *cnt) { - unsigned int cpu; + unsigned int timeout, loops; - /* - * Make sure all CPUs are online. It's fine for SMT to be disabled if - * all the primary threads are still online. - */ - for_each_present_cpu(cpu) { - if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { - pr_err("Not all CPUs online, aborting microcode update.\n"); - return -EINVAL; + WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0); + + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (!raw_atomic_read(cnt)) + return true; + + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); + + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); } } - - return 0; + /* Prevent the late comers from making progress and let them time out */ + raw_atomic_inc(cnt); + return false; } -static atomic_t late_cpus_in; -static atomic_t late_cpus_out; - -static int __wait_for_cpus(atomic_t *t, long long timeout) +static noinstr bool wait_for_ctrl(void) { - int all_cpus = num_online_cpus(); + unsigned int timeout, loops; - atomic_inc(t); + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + return true; - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) { - pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", - all_cpus - atomic_read(t)); - return 1; - } + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); - ndelay(SPINUNIT); - timeout -= SPINUNIT; + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); + } + } + return false; +} - touch_nmi_watchdog(); +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr bool load_secondary_wait(unsigned int ctrl_cpu) +{ + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return false; } - return 0; + + /* + * Wait for primary threads to complete. If one of them hangs due + * to the update, there is no way out. This is non-recoverable + * because the CPU might hold locks or resources and confuse the + * scheduler, watchdogs etc. There is no way to safely evacuate the + * machine. + */ + if (wait_for_ctrl()) + return true; + + instrumentation_begin(); + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + instrumentation_end(); } /* - * Returns: - * < 0 - on error - * 0 - success (no update done or microcode was updated) + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. */ -static int __reload_late(void *info) +static noinstr void load_secondary(unsigned int cpu) { - int cpu = smp_processor_id(); - enum ucode_state err; - int ret = 0; + unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + if (!load_secondary_wait(ctrl_cpu)) { + instrumentation_begin(); + pr_err_once("load: %d CPUs timed out\n", + atomic_read(&late_cpus_in) - 1); + instrumentation_end(); + return; + } + /* Primary thread completed. Allow to invoke instrumentable code */ + instrumentation_begin(); /* - * Wait for all CPUs to arrive. A load will not be attempted unless all - * CPUs show up. - * */ - if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) - return -1; + * If the primary succeeded then invoke the apply() callback, + * otherwise copy the state from the primary thread. + */ + if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) + ret = microcode_ops->apply_microcode(cpu); + else + ret = per_cpu(ucode_ctrl.result, ctrl_cpu); + + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + instrumentation_end(); +} + +static void __load_primary(unsigned int cpu) +{ + struct cpumask *secondaries = topology_sibling_cpumask(cpu); + enum sibling_ctrl ctrl; + enum ucode_state ret; + unsigned int sibling; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + return; + } + + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); /* - * On an SMT system, it suffices to load the microcode on one sibling of - * the core because the microcode engine is shared between the threads. - * Synchronization still needs to take place so that no concurrent - * loading attempts happen on multiple threads of an SMT core. See - * below. + * If the update was successful, let the siblings run the apply() + * callback. If not, tell them it's done. This also covers the + * case where the CPU has uniform loading at package or system + * scope implemented but does not advertise it. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - err = microcode_ops->apply_microcode(cpu); + if (ret == UCODE_UPDATED || ret == UCODE_OK) + ctrl = SCTRL_APPLY; else - goto wait_for_siblings; + ctrl = SCTRL_DONE; + + for_each_cpu(sibling, secondaries) { + if (sibling != cpu) + per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; + } +} + +static bool kick_offline_cpus(unsigned int nr_offl) +{ + unsigned int cpu, timeout; + + for_each_cpu(cpu, &cpu_offline_mask) { + /* Enable the rendezvous handler and send NMI */ + per_cpu(ucode_ctrl.nmi_enabled, cpu) = true; + apic_send_nmi_to_offline_cpu(cpu); + } + + /* Wait for them to arrive */ + for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { + if (atomic_read(&offline_in_nmi) == nr_offl) + return true; + udelay(1); + } + /* Let the others time out */ + return false; +} - if (err >= UCODE_NFOUND) { - if (err == UCODE_ERROR) - pr_warn("Error reloading microcode on CPU %d\n", cpu); +static void release_offline_cpus(void) +{ + unsigned int cpu; + + for_each_cpu(cpu, &cpu_offline_mask) + per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE; +} + +static void load_primary(unsigned int cpu) +{ + unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); + bool proceed = true; + + /* Kick soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + proceed = kick_offline_cpus(nr_offl); + + /* If the soft-offlined CPUs did not respond, abort */ + if (proceed) + __load_primary(cpu); + + /* Unconditionally release soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + release_offline_cpus(); +} + +/* + * Minimal stub rendezvous handler for soft-offlined CPUs which participate + * in the NMI rendezvous to protect against a concurrent NMI on affected + * CPUs. + */ +void noinstr microcode_offline_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return; + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE); + raw_atomic_inc(&offline_in_nmi); + wait_for_ctrl(); +} + +static noinstr bool microcode_update_handler(void) +{ + unsigned int cpu = raw_smp_processor_id(); - ret = -1; + if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) { + instrumentation_begin(); + load_primary(cpu); + instrumentation_end(); + } else { + load_secondary(cpu); } -wait_for_siblings: - if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) - panic("Timeout during microcode update!\n"); + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); + + return true; +} + +/* + * Protection against instrumentation is required for CPUs which are not + * safe against an NMI which is delivered to the secondary SMT sibling + * while the primary thread updates the microcode. Instrumentation can end + * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI + * which is the opposite of what the NMI rendezvous is trying to achieve. + * + * The primary thread is safe versus instrumentation as the actual + * microcode update handles this correctly. It's only the sibling code + * path which must be NMI safe until the primary thread completed the + * update. + */ +bool noinstr microcode_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return false; + + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + return microcode_update_handler(); +} + +static int load_cpus_stopped(void *unused) +{ + if (microcode_ops->use_nmi) { + /* Enable the NMI handler and raise NMI */ + this_cpu_write(ucode_ctrl.nmi_enabled, true); + apic->send_IPI(smp_processor_id(), NMI_VECTOR); + } else { + /* Just invoke the handler directly */ + microcode_update_handler(); + } + return 0; +} + +static int load_late_stop_cpus(bool is_safe) +{ + unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + unsigned int nr_offl, offline = 0; + int old_rev = boot_cpu_data.microcode; + struct cpuinfo_x86 prev_info; + + if (!is_safe) { + pr_err("Late microcode loading without minimal revision check.\n"); + pr_err("You should switch to early loading, if possible.\n"); + } /* - * At least one thread has completed update on each core. - * For others, simply call the update to make sure the - * per-cpu cpuinfo can be updated with right microcode - * revision. + * Pre-load the microcode image into a staging device. This + * process is preemptible and does not require stopping CPUs. + * Successful staging simplifies the subsequent late-loading + * process, reducing rendezvous time. + * + * Even if the transfer fails, the update will proceed as usual. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - err = microcode_ops->apply_microcode(cpu); + if (microcode_ops->use_staging) + microcode_ops->stage_microcode(); + + atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&offline_in_nmi, 0); + loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); + + /* + * Take a snapshot before the microcode update in order to compare and + * check whether any bits changed after an update. + */ + store_cpu_caps(&prev_info); + + if (microcode_ops->use_nmi) + static_branch_enable_cpuslocked(µcode_nmi_handler_enable); + + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + + if (microcode_ops->use_nmi) + static_branch_disable_cpuslocked(µcode_nmi_handler_enable); - return ret; + /* Analyze the results */ + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + switch (per_cpu(ucode_ctrl.result, cpu)) { + case UCODE_UPDATED: updated++; break; + case UCODE_TIMEOUT: timedout++; break; + case UCODE_OK: siblings++; break; + case UCODE_OFFLINE: offline++; break; + default: failed++; break; + } + } + + if (microcode_ops->finalize_late_load) + microcode_ops->finalize_late_load(!updated); + + if (!updated) { + /* Nothing changed. */ + if (!failed && !timedout) + return 0; + + nr_offl = cpumask_weight(&cpu_offline_mask); + if (offline < nr_offl) { + pr_warn("%u offline siblings did not respond.\n", + nr_offl - atomic_read(&offline_in_nmi)); + return -EIO; + } + pr_err("update failed: %u CPUs failed %u CPUs timed out\n", + failed, timedout); + return -EIO; + } + + if (!is_safe || failed || timedout) + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); + if (failed || timedout) { + pr_err("load incomplete. %u CPUs timed out or failed\n", + num_online_cpus() - (updated + siblings)); + } + pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode); + microcode_check(&prev_info); + + return updated + siblings == num_online_cpus() ? 0 : -EIO; } /* - * Reload microcode late on all CPUs. Wait for a sec until they - * all gather together. + * This function does two things: + * + * 1) Ensure that all required CPUs which are present and have been booted + * once are online. + * + * To pass this check, all primary threads must be online. + * + * If the microcode load is not safe against NMI then all SMT threads + * must be online as well because they still react to NMIs when they are + * soft-offlined and parked in one of the play_dead() variants. So if a + * NMI hits while the primary thread updates the microcode the resulting + * behaviour is undefined. The default play_dead() implementation on + * modern CPUs uses MWAIT, which is also not guaranteed to be safe + * against a microcode update which affects MWAIT. + * + * As soft-offlined CPUs still react on NMIs, the SMT sibling + * restriction can be lifted when the vendor driver signals to use NMI + * for rendezvous and the APIC provides a mechanism to send an NMI to a + * soft-offlined CPU. The soft-offlined CPUs are then able to + * participate in the rendezvous in a trivial stub handler. + * + * 2) Initialize the per CPU control structure and create a cpumask + * which contains "offline"; secondary threads, so they can be handled + * correctly by a control CPU. */ -static int microcode_reload_late(void) +static bool setup_cpus(void) { - int old = boot_cpu_data.microcode, ret; + struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; + bool allow_smt_offline; + unsigned int cpu; - pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); - pr_err("You should switch to early loading, if possible.\n"); + allow_smt_offline = microcode_ops->nmi_safe || + (microcode_ops->use_nmi && apic->nmi_to_offline_cpu); - atomic_set(&late_cpus_in, 0); - atomic_set(&late_cpus_out, 0); + cpumask_clear(&cpu_offline_mask); - ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); - if (ret == 0) - microcode_check(); + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + /* + * Offline CPUs sit in one of the play_dead() functions + * with interrupts disabled, but they still react on NMIs + * and execute arbitrary code. Also MWAIT being updated + * while the offline CPU sits there is not necessarily safe + * on all CPU variants. + * + * Mark them in the offline_cpus mask which will be handled + * by CPU0 later in the update process. + * + * Ensure that the primary thread is online so that it is + * guaranteed that all cores are updated. + */ + if (!cpu_online(cpu)) { + if (topology_is_primary_thread(cpu) || !allow_smt_offline) { + pr_err("CPU %u not online, loading aborted\n", cpu); + return false; + } + cpumask_set_cpu(cpu, &cpu_offline_mask); + per_cpu(ucode_ctrl, cpu) = ctrl; + continue; + } - pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n", - old, boot_cpu_data.microcode); + /* + * Initialize the per CPU state. This is core scope for now, + * but prepared to take package or system scope into account. + */ + ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu)); + per_cpu(ucode_ctrl, cpu) = ctrl; + } + return true; +} - return ret; +static int load_late_locked(void) +{ + if (!setup_cpus()) + return -EBUSY; + + switch (microcode_ops->request_microcode_fw(0, µcode_fdev->dev)) { + case UCODE_NEW: + return load_late_stop_cpus(false); + case UCODE_NEW_SAFE: + return load_late_stop_cpus(true); + case UCODE_NFOUND: + return -ENOENT; + case UCODE_OK: + return 0; + default: + return -EBADFD; + } } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - enum ucode_state tmp_ret = UCODE_OK; - int bsp = boot_cpu_data.cpu_index; unsigned long val; - ssize_t ret = 0; + ssize_t ret; ret = kstrtoul(buf, 0, &val); - if (ret) - return ret; - - if (val != 1) - return size; + if (ret || val != 1) + return -EINVAL; cpus_read_lock(); - - ret = check_online_cpus(); - if (ret) - goto put; - - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); - if (tmp_ret != UCODE_NEW) - goto put; - - mutex_lock(µcode_mutex); - ret = microcode_reload_late(); - mutex_unlock(µcode_mutex); - -put: + ret = load_late_locked(); cpus_read_unlock(); - if (ret == 0) - ret = size; - - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - - return ret; + return ret ? : size; } static DEVICE_ATTR_WO(reload); @@ -507,7 +781,7 @@ static ssize_t version_show(struct device *dev, return sprintf(buf, "0x%x\n", uci->cpu_sig.rev); } -static ssize_t pf_show(struct device *dev, +static ssize_t processor_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ucode_cpu_info *uci = ucode_cpu_info + dev->id; @@ -515,8 +789,8 @@ static ssize_t pf_show(struct device *dev, return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); } -static DEVICE_ATTR(version, 0444, version_show, NULL); -static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL); +static DEVICE_ATTR_RO(version); +static DEVICE_ATTR_RO(processor_flags); static struct attribute *mc_default_attrs[] = { &dev_attr_version.attr, @@ -535,17 +809,6 @@ static void microcode_fini_cpu(int cpu) microcode_ops->microcode_fini_cpu(cpu); } -static enum ucode_state microcode_init_cpu(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - - return microcode_ops->apply_microcode(cpu); -} - /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ @@ -557,26 +820,34 @@ void microcode_bsp_resume(void) if (uci->mc) microcode_ops->apply_microcode(cpu); else - reload_early_microcode(); + reload_early_microcode(cpu); } -static struct syscore_ops mc_syscore_ops = { - .resume = microcode_bsp_resume, -}; - -static int mc_cpu_starting(unsigned int cpu) +static void microcode_bsp_syscore_resume(void *data) { - enum ucode_state err = microcode_ops->apply_microcode(cpu); + microcode_bsp_resume(); +} - pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); +static const struct syscore_ops mc_syscore_ops = { + .resume = microcode_bsp_syscore_resume, +}; - return err == UCODE_ERROR; -} +static struct syscore mc_syscore = { + .ops = &mc_syscore_ops, +}; static int mc_cpu_online(unsigned int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct device *dev = get_cpu_device(cpu); + memset(uci, 0, sizeof(*uci)); + + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; + if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; @@ -584,33 +855,13 @@ static int mc_cpu_online(unsigned int cpu) static int mc_cpu_down_prep(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); + struct device *dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); - - /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); - pr_debug("%s: CPU%d\n", __func__, cpu); - return 0; } -static void setup_online_cpu(struct work_struct *work) -{ - int cpu = smp_processor_id(); - enum ucode_state err; - - err = microcode_init_cpu(cpu); - if (err == UCODE_ERROR) { - pr_err("Error applying microcode on CPU%d\n", cpu); - return; - } - - mc_cpu_online(cpu); -} - static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, @@ -625,10 +876,11 @@ static const struct attribute_group cpu_root_microcode_group = { static int __init microcode_init(void) { + struct device *dev_root; struct cpuinfo_x86 *c = &boot_cpu_data; int error; - if (dis_ucode_ldr) + if (microcode_loader_disabled()) return -EINVAL; if (c->x86_vendor == X86_VENDOR_INTEL) @@ -641,33 +893,34 @@ static int __init microcode_init(void) if (!microcode_ops) return -ENODEV; - microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); - if (IS_ERR(microcode_pdev)) - return PTR_ERR(microcode_pdev); + pr_info_once("Current revision: 0x%08x\n", (early_data.new_rev ?: early_data.old_rev)); - error = sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpu_root_microcode_group); - if (error) { - pr_err("Error creating microcode group!\n"); - goto out_pdev; - } + if (early_data.new_rev) + pr_info_once("Updated early from: 0x%08x\n", early_data.old_rev); - /* Do per-CPU setup */ - schedule_on_each_cpu(setup_online_cpu); + microcode_fdev = faux_device_create("microcode", NULL, NULL); + if (!microcode_fdev) + return -ENODEV; - register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", - mc_cpu_starting, NULL); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", - mc_cpu_online, mc_cpu_down_prep); + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + error = sysfs_create_group(&dev_root->kobj, &cpu_root_microcode_group); + put_device(dev_root); + if (error) { + pr_err("Error creating microcode group!\n"); + goto out_pdev; + } + } - pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); + register_syscore(&mc_syscore); + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + mc_cpu_online, mc_cpu_down_prep); return 0; out_pdev: - platform_device_unregister(microcode_pdev); + faux_device_destroy(microcode_fdev); return error; } -fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init); diff --git a/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h b/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h new file mode 100644 index 000000000000..2d48e6593540 --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/intel-ucode-defs.h @@ -0,0 +1,160 @@ +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x03, .steppings = 0x0004, .driver_data = 0x2 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0001, .driver_data = 0x45 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0002, .driver_data = 0x40 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0004, .driver_data = 0x2c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x05, .steppings = 0x0008, .driver_data = 0x10 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0001, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0020, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x0400, .driver_data = 0xd }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x06, .steppings = 0x2000, .driver_data = 0x7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0002, .driver_data = 0x14 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0004, .driver_data = 0x38 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x07, .steppings = 0x0008, .driver_data = 0x2e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0002, .driver_data = 0x11 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0008, .driver_data = 0x8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0040, .driver_data = 0xc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x08, .steppings = 0x0400, .driver_data = 0x5 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x09, .steppings = 0x0020, .driver_data = 0x47 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0a, .steppings = 0x0001, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0a, .steppings = 0x0002, .driver_data = 0x1 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0b, .steppings = 0x0002, .driver_data = 0x1d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0b, .steppings = 0x0010, .driver_data = 0x2 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0d, .steppings = 0x0040, .driver_data = 0x18 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0e, .steppings = 0x0100, .driver_data = 0x39 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0e, .steppings = 0x1000, .driver_data = 0x59 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0004, .driver_data = 0x5d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0040, .driver_data = 0xd2 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0080, .driver_data = 0x6b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0400, .driver_data = 0x95 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x0800, .driver_data = 0xbc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x0f, .steppings = 0x2000, .driver_data = 0xa4 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x16, .steppings = 0x0002, .driver_data = 0x44 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0040, .driver_data = 0x60f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0080, .driver_data = 0x70a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x17, .steppings = 0x0400, .driver_data = 0xa0b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1a, .steppings = 0x0010, .driver_data = 0x12 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1a, .steppings = 0x0020, .driver_data = 0x1d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1c, .steppings = 0x0004, .driver_data = 0x219 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1c, .steppings = 0x0400, .driver_data = 0x107 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1d, .steppings = 0x0002, .driver_data = 0x29 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x1e, .steppings = 0x0020, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x25, .steppings = 0x0004, .driver_data = 0x11 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x25, .steppings = 0x0020, .driver_data = 0x7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x26, .steppings = 0x0002, .driver_data = 0x105 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2a, .steppings = 0x0080, .driver_data = 0x2f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2c, .steppings = 0x0004, .driver_data = 0x1f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2d, .steppings = 0x0040, .driver_data = 0x621 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2d, .steppings = 0x0080, .driver_data = 0x71a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2e, .steppings = 0x0040, .driver_data = 0xd }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x2f, .steppings = 0x0004, .driver_data = 0x3b }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x37, .steppings = 0x0100, .driver_data = 0x838 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x37, .steppings = 0x0200, .driver_data = 0x90d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3a, .steppings = 0x0200, .driver_data = 0x21 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3c, .steppings = 0x0008, .driver_data = 0x28 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3d, .steppings = 0x0010, .driver_data = 0x2f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0010, .driver_data = 0x42e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0040, .driver_data = 0x600 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3e, .steppings = 0x0080, .driver_data = 0x715 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3f, .steppings = 0x0004, .driver_data = 0x49 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x3f, .steppings = 0x0010, .driver_data = 0x1a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x45, .steppings = 0x0002, .driver_data = 0x26 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x46, .steppings = 0x0002, .driver_data = 0x1c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x47, .steppings = 0x0002, .driver_data = 0x22 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4c, .steppings = 0x0008, .driver_data = 0x368 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4c, .steppings = 0x0010, .driver_data = 0x411 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4d, .steppings = 0x0100, .driver_data = 0x12d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x4e, .steppings = 0x0008, .driver_data = 0xf0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0008, .driver_data = 0x1000191 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0010, .driver_data = 0x2007006 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0020, .driver_data = 0x3000010 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0080, .driver_data = 0x5003901 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x55, .steppings = 0x0800, .driver_data = 0x7002b01 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0004, .driver_data = 0x1c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0008, .driver_data = 0x700001c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0010, .driver_data = 0xf00001a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x56, .steppings = 0x0020, .driver_data = 0xe000015 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0004, .driver_data = 0x14 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0200, .driver_data = 0x48 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5c, .steppings = 0x0400, .driver_data = 0x28 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5e, .steppings = 0x0008, .driver_data = 0xf0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x5f, .steppings = 0x0002, .driver_data = 0x3e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x66, .steppings = 0x0008, .driver_data = 0x2a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6a, .steppings = 0x0020, .driver_data = 0xc0002f0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6a, .steppings = 0x0040, .driver_data = 0xd000404 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x6c, .steppings = 0x0002, .driver_data = 0x10002d0 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7a, .steppings = 0x0002, .driver_data = 0x42 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7a, .steppings = 0x0100, .driver_data = 0x26 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x7e, .steppings = 0x0020, .driver_data = 0xca }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8a, .steppings = 0x0002, .driver_data = 0x33 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8c, .steppings = 0x0002, .driver_data = 0xbc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8c, .steppings = 0x0004, .driver_data = 0x3c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8d, .steppings = 0x0002, .driver_data = 0x56 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0200, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0400, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x0800, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8e, .steppings = 0x1000, .driver_data = 0x100 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0010, .driver_data = 0x2c0003f7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0020, .driver_data = 0x2c0003f7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0040, .driver_data = 0x2c0003f7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0080, .driver_data = 0x2b000639 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x8f, .steppings = 0x0100, .driver_data = 0x2c0003f7 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x96, .steppings = 0x0002, .driver_data = 0x1a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x97, .steppings = 0x0004, .driver_data = 0x3a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x97, .steppings = 0x0020, .driver_data = 0x3a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9a, .steppings = 0x0008, .driver_data = 0x437 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9a, .steppings = 0x0010, .driver_data = 0x437 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9c, .steppings = 0x0001, .driver_data = 0x24000026 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0200, .driver_data = 0xf8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0400, .driver_data = 0xfa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x0800, .driver_data = 0xf6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x1000, .driver_data = 0xf8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0x9e, .steppings = 0x2000, .driver_data = 0x104 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0004, .driver_data = 0x100 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0008, .driver_data = 0x100 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa5, .steppings = 0x0020, .driver_data = 0x100 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa6, .steppings = 0x0001, .driver_data = 0x102 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa6, .steppings = 0x0002, .driver_data = 0x100 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xa7, .steppings = 0x0002, .driver_data = 0x64 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xaa, .steppings = 0x0010, .driver_data = 0x24 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xad, .steppings = 0x0002, .driver_data = 0xa0000d1 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xaf, .steppings = 0x0008, .driver_data = 0x3000341 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xb5, .steppings = 0x0001, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xb7, .steppings = 0x0002, .driver_data = 0x12f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xb7, .steppings = 0x0010, .driver_data = 0x12f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0004, .driver_data = 0x4128 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0008, .driver_data = 0x4128 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xba, .steppings = 0x0100, .driver_data = 0x4128 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbd, .steppings = 0x0002, .driver_data = 0x11f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbe, .steppings = 0x0001, .driver_data = 0x1d }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0004, .driver_data = 0x3a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0020, .driver_data = 0x3a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0040, .driver_data = 0x3a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xbf, .steppings = 0x0080, .driver_data = 0x3a }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xc5, .steppings = 0x0004, .driver_data = 0x118 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xc6, .steppings = 0x0004, .driver_data = 0x118 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xc6, .steppings = 0x0010, .driver_data = 0x118 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xca, .steppings = 0x0004, .driver_data = 0x118 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xcf, .steppings = 0x0002, .driver_data = 0x210002a9 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0x6, .model = 0xcf, .steppings = 0x0004, .driver_data = 0x210002a9 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x00, .steppings = 0x0080, .driver_data = 0x12 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x00, .steppings = 0x0400, .driver_data = 0x15 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x01, .steppings = 0x0004, .driver_data = 0x2e }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0010, .driver_data = 0x21 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0020, .driver_data = 0x2c }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0040, .driver_data = 0x10 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0080, .driver_data = 0x39 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x02, .steppings = 0x0200, .driver_data = 0x2f }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0004, .driver_data = 0xa }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0008, .driver_data = 0xc }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x03, .steppings = 0x0010, .driver_data = 0x17 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0002, .driver_data = 0x17 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0008, .driver_data = 0x5 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0010, .driver_data = 0x6 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0080, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0100, .driver_data = 0xe }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0200, .driver_data = 0x3 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x04, .steppings = 0x0400, .driver_data = 0x4 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0004, .driver_data = 0xf }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0010, .driver_data = 0x4 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0020, .driver_data = 0x8 }, +{ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, .vendor = X86_VENDOR_INTEL, .family = 0xf, .model = 0x06, .steppings = 0x0100, .driver_data = 0x9 }, diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index cac2bdb57f0b..8744f3adc2a0 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -10,642 +10,831 @@ * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> * H Peter Anvin" <hpa@zytor.com> */ - -/* - * This needs to be before all headers so that pr_debug in printk.h doesn't turn - * printk calls into no_printk(). - * - *#define DEBUG - */ #define pr_fmt(fmt) "microcode: " fmt - #include <linux/earlycpio.h> #include <linux/firmware.h> +#include <linux/pci_ids.h> #include <linux/uaccess.h> -#include <linux/vmalloc.h> #include <linux/initrd.h> #include <linux/kernel.h> +#include <linux/delay.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/uio.h> +#include <linux/io.h> #include <linux/mm.h> -#include <asm/microcode_intel.h> -#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/setup.h> #include <asm/msr.h> +#include "internal.h" + static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; +#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL) + +/* Defines for the microcode staging mailbox interface */ +#define MBOX_REG_NUM 4 +#define MBOX_REG_SIZE sizeof(u32) + +#define MBOX_CONTROL_OFFSET 0x0 +#define MBOX_STATUS_OFFSET 0x4 +#define MBOX_WRDATA_OFFSET 0x8 +#define MBOX_RDDATA_OFFSET 0xc + +#define MASK_MBOX_CTRL_ABORT BIT(0) +#define MASK_MBOX_CTRL_GO BIT(31) + +#define MASK_MBOX_STATUS_ERROR BIT(2) +#define MASK_MBOX_STATUS_READY BIT(31) + +#define MASK_MBOX_RESP_SUCCESS BIT(0) +#define MASK_MBOX_RESP_PROGRESS BIT(1) +#define MASK_MBOX_RESP_ERROR BIT(2) + +#define MBOX_CMD_LOAD 0x3 +#define MBOX_OBJ_STAGING 0xb +#define MBOX_HEADER(size) ((PCI_VENDOR_ID_INTEL) | \ + (MBOX_OBJ_STAGING << 16) | \ + ((u64)((size) / sizeof(u32)) << 32)) + +/* The size of each mailbox header */ +#define MBOX_HEADER_SIZE sizeof(u64) +/* The size of staging hardware response */ +#define MBOX_RESPONSE_SIZE sizeof(u64) + +#define MBOX_XACTION_TIMEOUT_MS (10 * MSEC_PER_SEC) + /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch; +static struct microcode_intel *ucode_patch_va __read_mostly; +static struct microcode_intel *ucode_patch_late __read_mostly; /* last level cache size per core */ -static int llc_size_per_core; +static unsigned int llc_size_per_core __ro_after_init; -/* - * Returns 1 if update has been found, 0 otherwise. +/* microcode format is extended from prescott processors */ +struct extended_signature { + unsigned int sig; + unsigned int pf; + unsigned int cksum; +}; + +struct extended_sigtable { + unsigned int count; + unsigned int cksum; + unsigned int reserved[3]; + struct extended_signature sigs[]; +}; + +/** + * struct staging_state - Track the current staging process state + * + * @mmio_base: MMIO base address for staging + * @ucode_len: Total size of the microcode image + * @chunk_size: Size of each data piece + * @bytes_sent: Total bytes transmitted so far + * @offset: Current offset in the microcode image */ -static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) -{ - struct microcode_header_intel *mc_hdr = mc; +struct staging_state { + void __iomem *mmio_base; + unsigned int ucode_len; + unsigned int chunk_size; + unsigned int bytes_sent; + unsigned int offset; +}; - if (mc_hdr->rev <= new_rev) - return 0; +#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) +#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable)) +#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature)) + +static inline unsigned int get_totalsize(struct microcode_header_intel *hdr) +{ + return hdr->datasize ? hdr->totalsize : DEFAULT_UCODE_TOTALSIZE; +} - return intel_find_matching_signature(mc, csig, cpf); +static inline unsigned int exttable_size(struct extended_sigtable *et) +{ + return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE; } -static struct ucode_patch *memdup_patch(void *data, unsigned int size) +void intel_collect_cpu_info(struct cpu_signature *sig) { - struct ucode_patch *p; + sig->sig = cpuid_eax(1); + sig->pf = 0; + sig->rev = intel_get_microcode_revision(); - p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); - if (!p) - return NULL; + if (IFM(x86_family(sig->sig), x86_model(sig->sig)) >= INTEL_PENTIUM_III_DESCHUTES) { + unsigned int val[2]; - p->data = kmemdup(data, size, GFP_KERNEL); - if (!p->data) { - kfree(p); - return NULL; + /* get processor flags from MSR 0x17 */ + native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); + sig->pf = 1 << ((val[1] >> 18) & 7); } +} +EXPORT_SYMBOL_GPL(intel_collect_cpu_info); + +static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2, + unsigned int pf2) +{ + if (s1->sig != sig2) + return false; - return p; + /* Processor flags are either both 0 or they intersect. */ + return ((!s1->pf && !pf2) || (s1->pf & pf2)); } -static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig) { - struct microcode_header_intel *mc_hdr, *mc_saved_hdr; - struct ucode_patch *iter, *tmp, *p = NULL; - bool prev_found = false; - unsigned int sig, pf; + struct microcode_header_intel *mc_hdr = mc; + struct extended_signature *ext_sig; + struct extended_sigtable *ext_hdr; + int i; - mc_hdr = (struct microcode_header_intel *)data; + if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf)) + return true; - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - mc_saved_hdr = (struct microcode_header_intel *)iter->data; - sig = mc_saved_hdr->sig; - pf = mc_saved_hdr->pf; + /* Look for ext. headers: */ + if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE) + return false; - if (intel_find_matching_signature(data, sig, pf)) { - prev_found = true; + ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE; + ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; - if (mc_hdr->rev <= mc_saved_hdr->rev) - continue; + for (i = 0; i < ext_hdr->count; i++) { + if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf)) + return true; + ext_sig++; + } + return 0; +} +EXPORT_SYMBOL_GPL(intel_find_matching_signature); + +/** + * intel_microcode_sanity_check() - Sanity check microcode file. + * @mc: Pointer to the microcode file contents. + * @print_err: Display failure reason if true, silent if false. + * @hdr_type: Type of file, i.e. normal microcode file or In Field Scan file. + * Validate if the microcode header type matches with the type + * specified here. + * + * Validate certain header fields and verify if computed checksum matches + * with the one specified in the header. + * + * Return: 0 if the file passes all the checks, -EINVAL if any of the checks + * fail. + */ +int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) +{ + unsigned long total_size, data_size, ext_table_size; + struct microcode_header_intel *mc_header = mc; + struct extended_sigtable *ext_header = NULL; + u32 sum, orig_sum, ext_sigcount = 0, i; + struct extended_signature *ext_sig; + + total_size = get_totalsize(mc_header); + data_size = intel_microcode_get_datasize(mc_header); + + if (data_size + MC_HEADER_SIZE > total_size) { + if (print_err) + pr_err("Error: bad microcode data file size.\n"); + return -EINVAL; + } + + if (mc_header->ldrver != 1 || mc_header->hdrver != hdr_type) { + if (print_err) + pr_err("Error: invalid/unknown microcode update format. Header type %d\n", + mc_header->hdrver); + return -EINVAL; + } + + ext_table_size = total_size - (MC_HEADER_SIZE + data_size); + if (ext_table_size) { + u32 ext_table_sum = 0; + u32 *ext_tablep; - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer %p\n", data); - else { - list_replace(&iter->plist, &p->plist); - kfree(iter->data); - kfree(iter); - } + if (ext_table_size < EXT_HEADER_SIZE || + ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { + if (print_err) + pr_err("Error: truncated extended signature table.\n"); + return -EINVAL; + } + + ext_header = mc + MC_HEADER_SIZE + data_size; + if (ext_table_size != exttable_size(ext_header)) { + if (print_err) + pr_err("Error: extended signature table size mismatch.\n"); + return -EFAULT; + } + + ext_sigcount = ext_header->count; + + /* + * Check extended table checksum: the sum of all dwords that + * comprise a valid table must be 0. + */ + ext_tablep = (u32 *)ext_header; + + i = ext_table_size / sizeof(u32); + while (i--) + ext_table_sum += ext_tablep[i]; + + if (ext_table_sum) { + if (print_err) + pr_warn("Bad extended signature table checksum, aborting.\n"); + return -EINVAL; } } /* - * There weren't any previous patches found in the list cache; save the - * newly found. + * Calculate the checksum of update data and header. The checksum of + * valid update data and header including the extended signature table + * must be 0. */ - if (!prev_found) { - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer for %p\n", data); - else - list_add_tail(&p->plist, µcode_cache); + orig_sum = 0; + i = (MC_HEADER_SIZE + data_size) / sizeof(u32); + while (i--) + orig_sum += ((u32 *)mc)[i]; + + if (orig_sum) { + if (print_err) + pr_err("Bad microcode data checksum, aborting.\n"); + return -EINVAL; } - if (!p) - return; + if (!ext_table_size) + return 0; - if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) - return; + /* + * Check extended signature checksum: 0 => valid. + */ + for (i = 0; i < ext_sigcount; i++) { + ext_sig = (void *)ext_header + EXT_HEADER_SIZE + + EXT_SIGNATURE_SIZE * i; + + sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); + if (sum) { + if (print_err) + pr_err("Bad extended signature checksum, aborting.\n"); + return -EINVAL; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); + +static void update_ucode_pointer(struct microcode_intel *mc) +{ + kvfree(ucode_patch_va); /* - * Save for early loading. On 32-bit, that needs to be a physical - * address as the APs are running from physical addresses, before - * paging has been enabled. + * Save the virtual address for early loading and for eventual free + * on late loading. */ - if (IS_ENABLED(CONFIG_X86_32)) - intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); + ucode_patch_va = mc; +} + +static void save_microcode_patch(struct microcode_intel *patch) +{ + unsigned int size = get_totalsize(&patch->hdr); + struct microcode_intel *mc; + + mc = kvmemdup(patch, size, GFP_KERNEL); + if (mc) + update_ucode_pointer(mc); else - intel_ucode_patch = p->data; + pr_err("Unable to allocate microcode memory size: %u\n", size); } -/* - * Get microcode matching with BSP's model. Only CPUs with the same model as - * BSP can stay in the platform. - */ -static struct microcode_intel * -scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) +/* Scan blob for microcode matching the boot CPUs family, model, stepping */ +static __init struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, + bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; + u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; - while (size) { - if (size < sizeof(struct microcode_header_intel)) - break; - + for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) { mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); - if (!mc_size || - mc_size > size || + if (!mc_size || mc_size > size || intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - size -= mc_size; - - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, - uci->cpu_sig.pf)) { - data += mc_size; + if (!intel_find_matching_signature(data, &uci->cpu_sig)) continue; - } + /* + * For saving the early microcode, find the matching revision which + * was loaded on the BSP. + * + * On the BSP during early boot, find a newer revision than + * actually loaded in the CPU. + */ if (save) { - save_microcode_patch(uci, data, mc_size); - goto next; - } - - - if (!patch) { - if (!has_newer_microcode(data, - uci->cpu_sig.sig, - uci->cpu_sig.pf, - uci->cpu_sig.rev)) - goto next; - - } else { - struct microcode_header_intel *phdr = &patch->hdr; - - if (!has_newer_microcode(data, - phdr->sig, - phdr->pf, - phdr->rev)) - goto next; + if (cur_rev != mc_header->rev) + continue; + } else if (cur_rev >= mc_header->rev) { + continue; } - /* We have a newer patch, save it. */ patch = data; - -next: - data += mc_size; + cur_rev = mc_header->rev; } - if (size) - return NULL; - - return patch; + return size ? NULL : patch; } -static void show_saved_mc(void) +static inline u32 read_mbox_dword(void __iomem *mmio_base) { -#ifdef DEBUG - int i = 0, j; - unsigned int sig, pf, rev, total_size, data_size, date; - struct ucode_cpu_info uci; - struct ucode_patch *p; - - if (list_empty(µcode_cache)) { - pr_debug("no microcode data saved.\n"); - return; - } + u32 dword = readl(mmio_base + MBOX_RDDATA_OFFSET); - intel_cpu_collect_info(&uci); + /* Acknowledge read completion to the staging hardware */ + writel(0, mmio_base + MBOX_RDDATA_OFFSET); + return dword; +} - sig = uci.cpu_sig.sig; - pf = uci.cpu_sig.pf; - rev = uci.cpu_sig.rev; - pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); +static inline void write_mbox_dword(void __iomem *mmio_base, u32 dword) +{ + writel(dword, mmio_base + MBOX_WRDATA_OFFSET); +} - list_for_each_entry(p, µcode_cache, plist) { - struct microcode_header_intel *mc_saved_header; - struct extended_sigtable *ext_header; - struct extended_signature *ext_sig; - int ext_sigcount; +static inline u64 read_mbox_header(void __iomem *mmio_base) +{ + u32 high, low; - mc_saved_header = (struct microcode_header_intel *)p->data; + low = read_mbox_dword(mmio_base); + high = read_mbox_dword(mmio_base); - sig = mc_saved_header->sig; - pf = mc_saved_header->pf; - rev = mc_saved_header->rev; - date = mc_saved_header->date; + return ((u64)high << 32) | low; +} - total_size = get_totalsize(mc_saved_header); - data_size = get_datasize(mc_saved_header); +static inline void write_mbox_header(void __iomem *mmio_base, u64 value) +{ + write_mbox_dword(mmio_base, value); + write_mbox_dword(mmio_base, value >> 32); +} - pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", - i++, sig, pf, rev, total_size, - date & 0xffff, - date >> 24, - (date >> 16) & 0xff); +static void write_mbox_data(void __iomem *mmio_base, u32 *chunk, unsigned int chunk_bytes) +{ + int i; - /* Look for ext. headers: */ - if (total_size <= data_size + MC_HEADER_SIZE) - continue; + /* + * The MMIO space is mapped as Uncached (UC). Each write arrives + * at the device as an individual transaction in program order. + * The device can then reassemble the sequence accordingly. + */ + for (i = 0; i < chunk_bytes / sizeof(u32); i++) + write_mbox_dword(mmio_base, chunk[i]); +} - ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; - ext_sigcount = ext_header->count; - ext_sig = (void *)ext_header + EXT_HEADER_SIZE; +/* + * Prepare for a new microcode transfer: reset hardware and record the + * image size. + */ +static void init_stage(struct staging_state *ss) +{ + ss->ucode_len = get_totalsize(&ucode_patch_late->hdr); - for (j = 0; j < ext_sigcount; j++) { - sig = ext_sig->sig; - pf = ext_sig->pf; + /* + * Abort any ongoing process, effectively resetting the device. + * Unlike regular mailbox data processing requests, this + * operation does not require a status check. + */ + writel(MASK_MBOX_CTRL_ABORT, ss->mmio_base + MBOX_CONTROL_OFFSET); +} - pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", - j, sig, pf); +/* + * Update the chunk size and decide whether another chunk can be sent. + * This accounts for remaining data and retry limits. + */ +static bool can_send_next_chunk(struct staging_state *ss, int *err) +{ + /* A page size or remaining bytes if this is the final chunk */ + ss->chunk_size = min(PAGE_SIZE, ss->ucode_len - ss->offset); - ext_sig++; - } + /* + * Each microcode image is divided into chunks, each at most + * one page size. A 10-chunk image would typically require 10 + * transactions. + * + * However, the hardware managing the mailbox has limited + * resources and may not cache the entire image, potentially + * requesting the same chunk multiple times. + * + * To tolerate this behavior, allow up to twice the expected + * number of transactions (i.e., a 10-chunk image can take up to + * 20 attempts). + * + * If the number of attempts exceeds this limit, treat it as + * exceeding the maximum allowed transfer size. + */ + if (ss->bytes_sent + ss->chunk_size > ss->ucode_len * 2) { + *err = -EMSGSIZE; + return false; } -#endif + + *err = 0; + return true; } /* - * Save this microcode patch. It will be loaded early when a CPU is - * hot-added or resumes. + * The hardware indicates completion by returning a sentinel end offset. */ -static void save_mc_for_early(struct ucode_cpu_info *uci, u8 *mc, unsigned int size) +static inline bool is_end_offset(u32 offset) { - /* Synchronization during CPU hotplug. */ - static DEFINE_MUTEX(x86_cpu_microcode_mutex); - - mutex_lock(&x86_cpu_microcode_mutex); - - save_microcode_patch(uci, mc, size); - show_saved_mc(); + return offset == UINT_MAX; +} - mutex_unlock(&x86_cpu_microcode_mutex); +/* + * Determine whether staging is complete: either the hardware signaled + * the end offset, or no more transactions are permitted (retry limit + * reached). + */ +static inline bool staging_is_complete(struct staging_state *ss, int *err) +{ + return is_end_offset(ss->offset) || !can_send_next_chunk(ss, err); } -static bool load_builtin_intel_microcode(struct cpio_data *cp) +/* + * Wait for the hardware to complete a transaction. + * Return 0 on success, or an error code on failure. + */ +static int wait_for_transaction(struct staging_state *ss) { - unsigned int eax = 1, ebx, ecx = 0, edx; - struct firmware fw; - char name[30]; + u32 timeout, status; - if (IS_ENABLED(CONFIG_X86_32)) - return false; + /* Allow time for hardware to complete the operation: */ + for (timeout = 0; timeout < MBOX_XACTION_TIMEOUT_MS; timeout++) { + msleep(1); - native_cpuid(&eax, &ebx, &ecx, &edx); + status = readl(ss->mmio_base + MBOX_STATUS_OFFSET); + /* Break out early if the hardware is ready: */ + if (status & MASK_MBOX_STATUS_READY) + break; + } - sprintf(name, "intel-ucode/%02x-%02x-%02x", - x86_family(eax), x86_model(eax), x86_stepping(eax)); + /* Check for explicit error response */ + if (status & MASK_MBOX_STATUS_ERROR) + return -EIO; - if (firmware_request_builtin(&fw, name)) { - cp->size = fw.size; - cp->data = (void *)fw.data; - return true; - } + /* + * Hardware has neither responded to the action nor signaled any + * error. Treat this as a timeout. + */ + if (!(status & MASK_MBOX_STATUS_READY)) + return -ETIMEDOUT; - return false; + return 0; } /* - * Print ucode update info. + * Transmit a chunk of the microcode image to the hardware. + * Return 0 on success, or an error code on failure. */ -static void -print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) +static int send_data_chunk(struct staging_state *ss, void *ucode_ptr) { - pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", - uci->cpu_sig.rev, - date & 0xffff, - date >> 24, - (date >> 16) & 0xff); -} + u32 *src_chunk = ucode_ptr + ss->offset; + u16 mbox_size; + + /* + * Write a 'request' mailbox object in this order: + * 1. Mailbox header includes total size + * 2. Command header specifies the load operation + * 3. Data section contains a microcode chunk + * + * Thus, the mailbox size is two headers plus the chunk size. + */ + mbox_size = MBOX_HEADER_SIZE * 2 + ss->chunk_size; + write_mbox_header(ss->mmio_base, MBOX_HEADER(mbox_size)); + write_mbox_header(ss->mmio_base, MBOX_CMD_LOAD); + write_mbox_data(ss->mmio_base, src_chunk, ss->chunk_size); + ss->bytes_sent += ss->chunk_size; -#ifdef CONFIG_X86_32 + /* Notify the hardware that the mailbox is ready for processing. */ + writel(MASK_MBOX_CTRL_GO, ss->mmio_base + MBOX_CONTROL_OFFSET); -static int delay_ucode_info; -static int current_mc_date; + return wait_for_transaction(ss); +} /* - * Print early updated ucode info after printk works. This is delayed info dump. + * Retrieve the next offset from the hardware response. + * Return 0 on success, or an error code on failure. */ -void show_ucode_info_early(void) +static int fetch_next_offset(struct staging_state *ss) { - struct ucode_cpu_info uci; + const u64 expected_header = MBOX_HEADER(MBOX_HEADER_SIZE + MBOX_RESPONSE_SIZE); + u32 offset, status; + u64 header; + + /* + * The 'response' mailbox returns three fields, in order: + * 1. Header + * 2. Next offset in the microcode image + * 3. Status flags + */ + header = read_mbox_header(ss->mmio_base); + offset = read_mbox_dword(ss->mmio_base); + status = read_mbox_dword(ss->mmio_base); + + /* All valid responses must start with the expected header. */ + if (header != expected_header) { + pr_err_once("staging: invalid response header (0x%llx)\n", header); + return -EBADR; + } - if (delay_ucode_info) { - intel_cpu_collect_info(&uci); - print_ucode_info(&uci, current_mc_date); - delay_ucode_info = 0; + /* + * Verify the offset: If not at the end marker, it must not + * exceed the microcode image length. + */ + if (!is_end_offset(offset) && offset > ss->ucode_len) { + pr_err_once("staging: invalid offset (%u) past the image end (%u)\n", + offset, ss->ucode_len); + return -EINVAL; } + + /* Hardware may report errors explicitly in the status field */ + if (status & MASK_MBOX_RESP_ERROR) + return -EPROTO; + + ss->offset = offset; + return 0; } /* - * At this point, we can not call printk() yet. Delay printing microcode info in - * show_ucode_info_early() until printk() works. + * Handle the staging process using the mailbox MMIO interface. The + * microcode image is transferred in chunks until completion. + * Return 0 on success or an error code on failure. */ -static void print_ucode(struct ucode_cpu_info *uci) +static int do_stage(u64 mmio_pa) { - struct microcode_intel *mc; - int *delay_ucode_info_p; - int *current_mc_date_p; + struct staging_state ss = {}; + int err; - mc = uci->mc; - if (!mc) - return; + ss.mmio_base = ioremap(mmio_pa, MBOX_REG_NUM * MBOX_REG_SIZE); + if (WARN_ON_ONCE(!ss.mmio_base)) + return -EADDRNOTAVAIL; + + init_stage(&ss); + + /* Perform the staging process while within the retry limit */ + while (!staging_is_complete(&ss, &err)) { + /* Send a chunk of microcode each time: */ + err = send_data_chunk(&ss, ucode_patch_late); + if (err) + break; + /* + * Then, ask the hardware which piece of the image it + * needs next. The same piece may be sent more than once. + */ + err = fetch_next_offset(&ss); + if (err) + break; + } - delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); - current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); + iounmap(ss.mmio_base); - *delay_ucode_info_p = 1; - *current_mc_date_p = mc->hdr.date; + return err; } -#else -static inline void print_ucode(struct ucode_cpu_info *uci) +static void stage_microcode(void) { - struct microcode_intel *mc; + unsigned int pkg_id = UINT_MAX; + int cpu, err; + u64 mmio_pa; - mc = uci->mc; - if (!mc) + if (!IS_ALIGNED(get_totalsize(&ucode_patch_late->hdr), sizeof(u32))) { + pr_err("Microcode image 32-bit misaligned (0x%x), staging failed.\n", + get_totalsize(&ucode_patch_late->hdr)); return; + } + + lockdep_assert_cpus_held(); + + /* + * The MMIO address is unique per package, and all the SMT + * primary threads are online here. Find each MMIO space by + * their package IDs to avoid duplicate staging. + */ + for_each_cpu(cpu, cpu_primary_thread_mask) { + if (topology_logical_package_id(cpu) == pkg_id) + continue; + + pkg_id = topology_logical_package_id(cpu); - print_ucode_info(uci, mc->hdr.date); + err = rdmsrq_on_cpu(cpu, MSR_IA32_MCU_STAGING_MBOX_ADDR, &mmio_pa); + if (WARN_ON_ONCE(err)) + return; + + err = do_stage(mmio_pa); + if (err) { + pr_err("Error: staging failed (%d) for CPU%d at package %u.\n", + err, cpu, pkg_id); + return; + } + } + + pr_info("Staging of patch revision 0x%x succeeded.\n", ucode_patch_late->hdr.rev); } -#endif -static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) +static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, + struct microcode_intel *mc, + u32 *cur_rev) { - struct microcode_intel *mc; u32 rev; - mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive * operation - when the other hyperthread has updated the microcode * already. */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; + *cur_rev = intel_get_microcode_revision(); + if (*cur_rev >= mc->hdr.rev) { + uci->cpu_sig.rev = *cur_rev; return UCODE_OK; } - /* - * Writeback and invalidate caches before updating microcode to avoid - * internal issues depending on what the microcode is updating. - */ - native_wbinvd(); - /* write microcode via MSR 0x79 */ - native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); + native_wrmsrq(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) - return -1; + return UCODE_ERROR; uci->cpu_sig.rev = rev; - - if (early) - print_ucode(uci); - else - print_ucode_info(uci, mc->hdr.date); - - return 0; + return UCODE_UPDATED; } -int __init save_microcode_in_initrd_intel(void) +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) { - struct ucode_cpu_info uci; - struct cpio_data cp; + struct microcode_intel *mc = uci->mc; + u32 cur_rev; - /* - * initrd is going away, clear patch ptr. We will scan the microcode one - * last time before jettisoning and save a patch, if found. Then we will - * update that pointer too, with a stable patch address to use when - * resuming the cores. - */ - intel_ucode_patch = NULL; - - if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path, false); + return __apply_microcode(uci, mc, &cur_rev); +} - if (!(cp.data && cp.size)) - return 0; +static __init bool load_builtin_intel_microcode(struct cpio_data *cp) +{ + unsigned int eax = 1, ebx, ecx = 0, edx; + struct firmware fw; + char name[30]; - intel_cpu_collect_info(&uci); + if (IS_ENABLED(CONFIG_X86_32)) + return false; - scan_microcode(cp.data, cp.size, &uci, true); + native_cpuid(&eax, &ebx, &ecx, &edx); - show_saved_mc(); + sprintf(name, "intel-ucode/%02x-%02x-%02x", + x86_family(eax), x86_model(eax), x86_stepping(eax)); - return 0; + if (firmware_request_builtin(&fw, name)) { + cp->size = fw.size; + cp->data = (void *)fw.data; + return true; + } + return false; } -/* - * @res_patch, output: a pointer to the patch we found. - */ -static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) +static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save) { - static const char *path; struct cpio_data cp; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - path = ucode_path; - use_pa = false; - } - /* try built-in microcode first */ + intel_collect_cpu_info(&uci->cpu_sig); + if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return NULL; - intel_cpu_collect_info(uci); - - return scan_microcode(cp.data, cp.size, uci, false); + return scan_microcode(cp.data, cp.size, uci, save); } -void __init load_ucode_intel_bsp(void) +/* + * Invoked from an early init call to save the microcode blob which was + * selected during early boot when mm was not usable. The microcode must be + * saved because initrd is going away. It's an early init call so the APs + * just can use the pointer and do not have to scan initrd/builtin firmware + * again. + */ +static int __init save_builtin_microcode(void) { - struct microcode_intel *patch; struct ucode_cpu_info uci; - patch = __load_ucode_intel(&uci); - if (!patch) - return; + if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) + return 0; - uci.mc = patch; + if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return 0; - apply_microcode_early(&uci, true); + uci.mc = get_microcode_blob(&uci, true); + if (uci.mc) + save_microcode_patch(uci.mc); + return 0; } +early_initcall(save_builtin_microcode); -void load_ucode_intel_ap(void) +/* Load microcode on BSP from initrd or builtin blobs */ +void __init load_ucode_intel_bsp(struct early_load_data *ed) { - struct microcode_intel *patch, **iup; struct ucode_cpu_info uci; - if (IS_ENABLED(CONFIG_X86_32)) - iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); - else - iup = &intel_ucode_patch; - - if (!*iup) { - patch = __load_ucode_intel(&uci); - if (!patch) - return; + uci.mc = get_microcode_blob(&uci, false); + ed->old_rev = uci.cpu_sig.rev; - *iup = patch; + if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) { + ucode_patch_va = UCODE_BSP_LOADED; + ed->new_rev = uci.cpu_sig.rev; } - - uci.mc = *iup; - - apply_microcode_early(&uci, true); } -static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) +void load_ucode_intel_ap(void) { - struct microcode_header_intel *phdr; - struct ucode_patch *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - - phdr = (struct microcode_header_intel *)iter->data; - - if (phdr->rev <= uci->cpu_sig.rev) - continue; - - if (!intel_find_matching_signature(phdr, - uci->cpu_sig.sig, - uci->cpu_sig.pf)) - continue; + struct ucode_cpu_info uci; - return iter->data; - } - return NULL; + uci.mc = ucode_patch_va; + if (uci.mc) + apply_microcode_early(&uci); } +/* Reload microcode on resume */ void reload_ucode_intel(void) { - struct microcode_intel *p; - struct ucode_cpu_info uci; + struct ucode_cpu_info uci = { .mc = ucode_patch_va, }; - intel_cpu_collect_info(&uci); - - p = find_patch(&uci); - if (!p) - return; - - uci.mc = p; - - apply_microcode_early(&uci, false); + if (uci.mc) + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu_num); - unsigned int val[2]; - - memset(csig, 0, sizeof(*csig)); - - csig->sig = cpuid_eax(0x00000001); - - if ((c->x86_model >= 5) || (c->x86 > 6)) { - /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); - } - - csig->rev = c->microcode; - + intel_collect_cpu_info(csig); return 0; } -static enum ucode_state apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_late(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; - struct microcode_intel *mc; + struct microcode_intel *mc = ucode_patch_late; enum ucode_state ret; - static int prev_rev; - u32 rev; + u32 cur_rev; - /* We should bind the task to the CPU */ - if (WARN_ON(raw_smp_processor_id() != cpu)) + if (WARN_ON_ONCE(smp_processor_id() != cpu)) return UCODE_ERROR; - /* Look for a newer patch in our cache: */ - mc = find_patch(uci); - if (!mc) { - mc = uci->mc; - if (!mc) - return UCODE_NFOUND; - } + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret != UCODE_UPDATED && ret != UCODE_OK) + return ret; + + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; + + return ret; +} + +static bool ucode_validate_minrev(struct microcode_header_intel *mc_header) +{ + int cur_rev = boot_cpu_data.microcode; /* - * Save us the MSR write below - which is a particular expensive - * operation - when the other hyperthread has updated the microcode - * already. + * When late-loading, ensure the header declares a minimum revision + * required to perform a late-load. The previously reserved field + * is 0 in older microcode blobs. */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - ret = UCODE_OK; - goto out; + if (!mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n"); + return false; } /* - * Writeback and invalidate caches before updating microcode to avoid - * internal issues depending on what the microcode is updating. + * Check whether the current revision is either greater or equal to + * to the minimum revision specified in the header. */ - native_wbinvd(); - - /* write microcode via MSR 0x79 */ - wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); - - rev = intel_get_microcode_revision(); - - if (rev != mc->hdr.rev) { - pr_err("CPU%d update to revision 0x%x failed\n", - cpu, mc->hdr.rev); - return UCODE_ERROR; - } - - if (bsp && rev != prev_rev) { - pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", - rev, - mc->hdr.date & 0xffff, - mc->hdr.date >> 24, - (mc->hdr.date >> 16) & 0xff); - prev_rev = rev; + if (cur_rev < mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev); + pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver); + return false; } - - ret = UCODE_UPDATED; - -out: - uci->cpu_sig.rev = rev; - c->microcode = rev; - - /* Update boot_cpu_data's revision too, if we're on the BSP: */ - if (bsp) - boot_cpu_data.microcode = rev; - - return ret; + return true; } -static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) +static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - unsigned int curr_mc_size = 0, new_mc_size = 0; - enum ucode_state ret = UCODE_OK; - int new_rev = uci->cpu_sig.rev; + bool is_safe, new_is_safe = false; + int cur_rev = uci->cpu_sig.rev; + unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; - unsigned int csig, cpf; while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; @@ -654,72 +843,66 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { pr_err("error! Truncated or inaccessible header in microcode data file\n"); - break; + goto fail; } mc_size = get_totalsize(&mc_header); if (mc_size < sizeof(mc_header)) { pr_err("error! Bad data in microcode data file (totalsize too small)\n"); - break; + goto fail; } data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); - break; + goto fail; } /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { - vfree(mc); - mc = vmalloc(mc_size); + kvfree(mc); + mc = kvmalloc(mc_size, GFP_KERNEL); if (!mc) - break; + goto fail; curr_mc_size = mc_size; } memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || - intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { - break; - } + intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) + goto fail; - csig = uci->cpu_sig.sig; - cpf = uci->cpu_sig.pf; - if (has_newer_microcode(mc, csig, cpf, new_rev)) { - vfree(new_mc); - new_rev = mc_header.rev; - new_mc = mc; - new_mc_size = mc_size; - mc = NULL; /* trigger new vmalloc */ - ret = UCODE_NEW; - } - } + if (cur_rev >= mc_header.rev) + continue; + + if (!intel_find_matching_signature(mc, &uci->cpu_sig)) + continue; - vfree(mc); + is_safe = ucode_validate_minrev(&mc_header); + if (force_minrev && !is_safe) + continue; - if (iov_iter_count(iter)) { - vfree(new_mc); - return UCODE_ERROR; + kvfree(new_mc); + cur_rev = mc_header.rev; + new_mc = mc; + new_is_safe = is_safe; + mc = NULL; } + if (iov_iter_count(iter)) + goto fail; + + kvfree(mc); if (!new_mc) return UCODE_NFOUND; - vfree(uci->mc); - uci->mc = (struct microcode_intel *)new_mc; - - /* - * If early loading microcode is supported, save this mc into - * permanent memory. So it will be loaded early when a CPU is hot added - * or resumes. - */ - save_mc_for_early(uci, new_mc, new_mc_size); - - pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); + ucode_patch_late = (struct microcode_intel *)new_mc; + return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW; - return ret; +fail: + kvfree(mc); + kvfree(new_mc); + return UCODE_ERROR; } static bool is_blacklisted(unsigned int cpu) @@ -729,15 +912,14 @@ static bool is_blacklisted(unsigned int cpu) /* * Late loading on model 79 with microcode revision less than 0x0b000021 * and LLC size per core bigger than 2.5MB may result in a system hang. - * This behavior is documented in item BDF90, #334165 (Intel Xeon + * This behavior is documented in item BDX90, #334165 (Intel Xeon * Processor E7-8800/4800 v4 Product Family). */ - if (c->x86 == 6 && - c->x86_model == INTEL_FAM6_BROADWELL_X && + if (c->x86_vfm == INTEL_BROADWELL_X && c->x86_stepping == 0x01 && llc_size_per_core > 2621440 && c->microcode < 0x0b000021) { - pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); + pr_err_once("Erratum BDX90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); return true; } @@ -768,26 +950,49 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) kvec.iov_base = (void *)firmware->data; kvec.iov_len = firmware->size; iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); - ret = generic_load_microcode(cpu, &iter); + ret = parse_microcode_blobs(cpu, &iter); release_firmware(firmware); return ret; } +static void finalize_late_load(int result) +{ + if (!result) + update_ucode_pointer(ucode_patch_late); + else + kvfree(ucode_patch_late); + ucode_patch_late = NULL; +} + static struct microcode_ops microcode_intel_ops = { - .request_microcode_fw = request_microcode_fw, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode_late, + .finalize_late_load = finalize_late_load, + .stage_microcode = stage_microcode, + .use_nmi = IS_ENABLED(CONFIG_X86_64), }; -static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) { u64 llc_size = c->x86_cache_size * 1024ULL; - do_div(llc_size, c->x86_max_cores); + do_div(llc_size, topology_num_cores_per_package()); + llc_size_per_core = (unsigned int)llc_size; +} + +static __init bool staging_available(void) +{ + u64 val; + + val = x86_read_arch_cap_msr(); + if (!(val & ARCH_CAP_MCU_ENUM)) + return false; - return (int)llc_size; + rdmsrq(MSR_IA32_MCU_ENUMERATION, val); + return !!(val & MCU_STAGING); } struct microcode_ops * __init init_intel_microcode(void) @@ -800,7 +1005,12 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } - llc_size_per_core = calc_llc_size_per_core(c); + if (staging_available()) { + microcode_intel_ops.use_staging = true; + pr_info("Enabled staging feature.\n"); + } + + calc_llc_size_per_core(c); return µcode_intel_ops; } diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h new file mode 100644 index 000000000000..a10b547eda1e --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _X86_MICROCODE_INTERNAL_H +#define _X86_MICROCODE_INTERNAL_H + +#include <linux/earlycpio.h> +#include <linux/initrd.h> + +#include <asm/cpu.h> +#include <asm/microcode.h> + +struct device; + +enum ucode_state { + UCODE_OK = 0, + UCODE_NEW, + UCODE_NEW_SAFE, + UCODE_UPDATED, + UCODE_NFOUND, + UCODE_ERROR, + UCODE_TIMEOUT, + UCODE_OFFLINE, +}; + +struct microcode_ops { + enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); + void (*microcode_fini_cpu)(int cpu); + + /* + * The generic 'microcode_core' part guarantees that the callbacks + * below run on a target CPU when they are being called. + * See also the "Synchronization" section in microcode_core.c. + */ + enum ucode_state (*apply_microcode)(int cpu); + void (*stage_microcode)(void); + int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); + unsigned int nmi_safe : 1, + use_nmi : 1, + use_staging : 1; +}; + +struct early_load_data { + u32 old_rev; + u32 new_rev; +}; + +extern struct early_load_data early_data; +extern struct ucode_cpu_info ucode_cpu_info[]; +extern u32 microcode_rev[NR_CPUS]; +extern u32 base_rev; + +struct cpio_data find_microcode_in_initrd(const char *path); + +#define MAX_UCODE_COUNT 128 + +#define QCHAR(a, b, c, d) ((a) + ((b) << 8) + ((c) << 16) + ((d) << 24)) +#define CPUID_INTEL1 QCHAR('G', 'e', 'n', 'u') +#define CPUID_INTEL2 QCHAR('i', 'n', 'e', 'I') +#define CPUID_INTEL3 QCHAR('n', 't', 'e', 'l') +#define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') +#define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') +#define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') + +#define CPUID_IS(a, b, c, ebx, ecx, edx) \ + (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) + +/* + * In early loading microcode phase on BSP, boot_cpu_data is not set up yet. + * x86_cpuid_vendor() gets vendor id for BSP. + * + * In 32 bit AP case, accessing boot_cpu_data needs linear address. To simplify + * coding, we still use x86_cpuid_vendor() to get vendor id for AP. + * + * x86_cpuid_vendor() gets vendor information directly from CPUID. + */ +static inline int x86_cpuid_vendor(void) +{ + u32 eax = 0x00000000; + u32 ebx, ecx = 0, edx; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx)) + return X86_VENDOR_INTEL; + + if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) + return X86_VENDOR_AMD; + + return X86_VENDOR_UNKNOWN; +} + +static inline unsigned int x86_cpuid_family(void) +{ + u32 eax = 0x00000001; + u32 ebx, ecx = 0, edx; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + return x86_family(eax); +} + +extern bool force_minrev; + +#ifdef CONFIG_CPU_SUP_AMD +void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); +void load_ucode_amd_ap(unsigned int family); +void reload_ucode_amd(unsigned int cpu); +struct microcode_ops *init_amd_microcode(void); +void exit_amd_microcode(void); +#else /* CONFIG_CPU_SUP_AMD */ +static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } +static inline void load_ucode_amd_ap(unsigned int family) { } +static inline void reload_ucode_amd(unsigned int cpu) { } +static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } +static inline void exit_amd_microcode(void) { } +#endif /* !CONFIG_CPU_SUP_AMD */ + +#ifdef CONFIG_CPU_SUP_INTEL +void load_ucode_intel_bsp(struct early_load_data *ed); +void load_ucode_intel_ap(void); +void reload_ucode_intel(void); +struct microcode_ops *init_intel_microcode(void); +#else /* CONFIG_CPU_SUP_INTEL */ +static inline void load_ucode_intel_bsp(struct early_load_data *ed) { } +static inline void load_ucode_intel_ap(void) { } +static inline void reload_ucode_intel(void) { } +static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } +#endif /* !CONFIG_CPU_SUP_INTEL */ + +#define ucode_dbg(fmt, ...) \ +({ \ + if (IS_ENABLED(CONFIG_MICROCODE_DBG)) \ + pr_info(fmt, ##__VA_ARGS__); \ +}) + +#endif /* _X86_MICROCODE_INTERNAL_H */ |
