diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-09-30 13:19:08 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-09-30 13:19:08 -0700 |
commit | a65879b4584f98e6c1b80380f55ca8cfca82cb47 (patch) | |
tree | 8bcc7ca0654d0b081bc6b38c9a996235e29b0378 /arch/x86/kernel/cpu/cacheinfo.c | |
parent | d7ec0cf1cd79a74399b53453f9c48acbca7d6fce (diff) | |
parent | 27b1fd62012dfe9d3eb8ecde344d7aa673695ecf (diff) |
Merge tag 'x86_cpu_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cpuid updates from Borislav Petkov:
- Make UMIP instruction detection more robust
- Correct and cleanup AMD CPU topology detection; document the relevant
CPUID leaves topology parsing precedence on AMD
- Add support for running the kernel as guest on FreeBSD's Bhyve
hypervisor
- Cleanups and improvements
* tag 'x86_cpu_for_v6.18_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/umip: Fix decoding of register forms of 0F 01 (SGDT and SIDT aliases)
x86/umip: Check that the instruction opcode is at least two bytes
Documentation/x86/topology: Detail CPUID leaves used for topology enumeration
x86/cpu/topology: Define AMD64_CPUID_EXT_FEAT MSR
x86/cpu/topology: Check for X86_FEATURE_XTOPOLOGY instead of passing has_xtopology
x86/cpu/cacheinfo: Simplify cacheinfo_amd_init_llc_id() using _cpuid4_info
x86/cpu: Rename and move CPU model entry for Diamond Rapids
x86/cpu: Detect FreeBSD Bhyve hypervisor
Diffstat (limited to 'arch/x86/kernel/cpu/cacheinfo.c')
-rw-r--r-- | arch/x86/kernel/cpu/cacheinfo.c | 48 |
1 files changed, 21 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index adfa7e8bb865..51a95b07831f 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -290,6 +290,22 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) } /* + * The max shared threads number comes from CPUID(0x4) EAX[25-14] with input + * ECX as cache index. Then right shift apicid by the number's order to get + * cache id for this cache node. + */ +static unsigned int get_cache_id(u32 apicid, const struct _cpuid4_info *id4) +{ + unsigned long num_threads_sharing; + int index_msb; + + num_threads_sharing = 1 + id4->eax.split.num_threads_sharing; + index_msb = get_count_order(num_threads_sharing); + + return apicid >> index_msb; +} + +/* * AMD/Hygon CPUs may have multiple LLCs if L3 caches exist. */ @@ -312,18 +328,11 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, u16 die_id) * Newer families: LLC ID is calculated from the number * of threads sharing the L3 cache. */ - u32 eax, ebx, ecx, edx, num_sharing_cache = 0; u32 llc_index = find_num_cache_leaves(c) - 1; + struct _cpuid4_info id4 = {}; - cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); - if (eax) - num_sharing_cache = ((eax >> 14) & 0xfff) + 1; - - if (num_sharing_cache) { - int index_msb = get_count_order(num_sharing_cache); - - c->topo.llc_id = c->topo.apicid >> index_msb; - } + if (!amd_fill_cpuid4_info(llc_index, &id4)) + c->topo.llc_id = get_cache_id(c->topo.apicid, &id4); } } @@ -598,27 +607,12 @@ int init_cache_level(unsigned int cpu) return 0; } -/* - * The max shared threads number comes from CPUID(0x4) EAX[25-14] with input - * ECX as cache index. Then right shift apicid by the number's order to get - * cache id for this cache node. - */ -static void get_cache_id(int cpu, struct _cpuid4_info *id4) -{ - struct cpuinfo_x86 *c = &cpu_data(cpu); - unsigned long num_threads_sharing; - int index_msb; - - num_threads_sharing = 1 + id4->eax.split.num_threads_sharing; - index_msb = get_count_order(num_threads_sharing); - id4->id = c->topo.apicid >> index_msb; -} - int populate_cache_leaves(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *ci = this_cpu_ci->info_list; u8 cpu_vendor = boot_cpu_data.x86_vendor; + u32 apicid = cpu_data(cpu).topo.apicid; struct amd_northbridge *nb = NULL; struct _cpuid4_info id4 = {}; int idx, ret; @@ -628,7 +622,7 @@ int populate_cache_leaves(unsigned int cpu) if (ret) return ret; - get_cache_id(cpu, &id4); + id4.id = get_cache_id(apicid, &id4); if (cpu_vendor == X86_VENDOR_AMD || cpu_vendor == X86_VENDOR_HYGON) nb = amd_init_l3_cache(idx); |