diff options
| -rw-r--r-- | arch/x86/include/asm/cpufeature.h | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 8 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/amd.c | 9 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel.c | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
| -rw-r--r-- | drivers/acpi/processor_idle.c | 6 | 
6 files changed, 40 insertions, 8 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index cfdf8c2c5c31..ea408dcba513 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -80,7 +80,6 @@  #define X86_FEATURE_UP		(3*32+ 9) /* smp kernel running on up */  #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */  #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ -#define X86_FEATURE_NOPL	(3*32+20) /* The NOPL (0F 1F) instructions */  #define X86_FEATURE_PEBS	(3*32+12) /* Precise-Event Based Sampling */  #define X86_FEATURE_BTS		(3*32+13) /* Branch Trace Store */  #define X86_FEATURE_SYSCALL32	(3*32+14) /* "" syscall in ia32 userspace */ @@ -92,6 +91,8 @@  #define X86_FEATURE_NOPL	(3*32+20) /* The NOPL (0F 1F) instructions */  #define X86_FEATURE_AMDC1E	(3*32+21) /* AMD C1E detected */  #define X86_FEATURE_XTOPOLOGY	(3*32+22) /* cpu topology enum extensions */ +#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ +#define X86_FEATURE_NONSTOP_TSC	(3*32+24) /* TSC does not stop in C states */  /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */  #define X86_FEATURE_XMM3	(4*32+ 0) /* "pni" SSE-3 */ @@ -117,6 +118,7 @@  #define X86_FEATURE_XSAVE	(4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */  #define X86_FEATURE_OSXSAVE	(4*32+27) /* "" XSAVE enabled in the OS */  #define X86_FEATURE_AVX		(4*32+28) /* Advanced Vector Extensions */ +#define X86_FEATURE_HYPERVISOR	(4*32+31) /* Running on a hypervisor */  /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */  #define X86_FEATURE_XSTORE	(5*32+ 2) /* "rng" RNG present (xstore) */ @@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32];  #define cpu_has_xmm4_2		boot_cpu_has(X86_FEATURE_XMM4_2)  #define cpu_has_x2apic		boot_cpu_has(X86_FEATURE_X2APIC)  #define cpu_has_xsave		boot_cpu_has(X86_FEATURE_XSAVE) +#define cpu_has_hypervisor	boot_cpu_has(X86_FEATURE_HYPERVISOR)  #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)  # define cpu_has_invlpg		1 diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index ef8f831af823..2cf23634b6d9 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c @@ -120,9 +120,17 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)  	c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)  						 & core_select_mask;  	c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); +	/* +	 * Reinit the apicid, now that we have extended initial_apicid. +	 */ +	c->apicid = phys_pkg_id(c->initial_apicid, 0);  #else  	c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;  	c->phys_proc_id = phys_pkg_id(core_plus_mask_width); +	/* +	 * Reinit the apicid, now that we have extended initial_apicid. +	 */ +	c->apicid = phys_pkg_id(0);  #endif  	c->x86_max_cores = (core_level_siblings / smp_num_siblings); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 8f1e31db2ad5..7c878f6aa919 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -283,9 +283,14 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)  {  	early_init_amd_mc(c); -	/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ -	if (c->x86_power & (1<<8)) +	/* +	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate +	 * with P/T states and does not stop in deep C-states +	 */ +	if (c->x86_power & (1 << 8)) {  		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); +		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); +	}  #ifdef CONFIG_X86_64  	set_cpu_cap(c, X86_FEATURE_SYSCALL32); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index cd413d9a0218..8ea6929e974c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -40,6 +40,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)  	if (c->x86 == 15 && c->x86_cache_alignment == 64)  		c->x86_cache_alignment = 128;  #endif + +	/* +	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate +	 * with P/T states and does not stop in deep C-states +	 */ +	if (c->x86_power & (1 << 8)) { +		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); +		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); +	} +  }  #ifdef CONFIG_X86_32 @@ -241,6 +251,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)  	intel_workarounds(c); +	/* +	 * Detect the extended topology information if available. This +	 * will reinitialise the initial_apicid which will be used +	 * in init_intel_cacheinfo() +	 */ +	detect_extended_topology(c); +  	l2 = init_intel_cacheinfo(c);  	if (c->cpuid_level > 9) {  		unsigned eax = cpuid_eax(10); @@ -308,7 +325,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)  		set_cpu_cap(c, X86_FEATURE_P3);  #endif -	detect_extended_topology(c);  	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {  		/*  		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c27af49a4ede..cff9a50e389d 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -286,7 +286,7 @@ static void c1e_idle(void)  		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);  		if (lo & K8_INTP_C1E_ACTIVE_MASK) {  			c1e_detected = 1; -			if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) +			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))  				mark_tsc_unstable("TSC halt in AMD C1E");  			printk(KERN_INFO "System has AMD C1E enabled\n");  			set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 5f8d746a9b81..38aca048e951 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -374,15 +374,15 @@ static int tsc_halts_in_c(int state)  {  	switch (boot_cpu_data.x86_vendor) {  	case X86_VENDOR_AMD: +	case X86_VENDOR_INTEL:  		/*  		 * AMD Fam10h TSC will tick in all  		 * C/P/S0/S1 states when this bit is set.  		 */ -		if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) +		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))  			return 0; +  		/*FALL THROUGH*/ -	case X86_VENDOR_INTEL: -		/* Several cases known where TSC halts in C2 too */  	default:  		return state > ACPI_STATE_C1;  	}  | 
