diff options
Diffstat (limited to 'drivers/clocksource/hyperv_timer.c')
| -rw-r--r-- | drivers/clocksource/hyperv_timer.c | 184 |
1 files changed, 116 insertions, 68 deletions
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index c0cef92b12b8..10356d4ec55c 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -22,12 +22,14 @@ #include <linux/irq.h> #include <linux/acpi.h> #include <linux/hyperv.h> +#include <linux/export.h> #include <clocksource/hyperv_timer.h> -#include <asm/hyperv-tlfs.h> +#include <hyperv/hvhdk.h> #include <asm/mshyperv.h> static struct clock_event_device __percpu *hv_clock_event; -static u64 hv_sched_clock_offset __ro_after_init; +/* Note: offset can hold negative values after hibernation. */ +static u64 hv_sched_clock_offset __read_mostly; /* * If false, we're using the old mechanism for stimer0 interrupts @@ -49,7 +51,7 @@ static bool direct_mode_enabled; static int stimer0_irq = -1; static int stimer0_message_sint; -static DEFINE_PER_CPU(long, stimer0_evt); +static __maybe_unused DEFINE_PER_CPU(long, stimer0_evt); /* * Common code for stimer0 interrupts coming via Direct Mode or @@ -68,7 +70,7 @@ EXPORT_SYMBOL_GPL(hv_stimer0_isr); * stimer0 interrupt handler for architectures that support * per-cpu interrupts, which also implies Direct Mode. */ -static irqreturn_t hv_stimer0_percpu_isr(int irq, void *dev_id) +static irqreturn_t __maybe_unused hv_stimer0_percpu_isr(int irq, void *dev_id) { hv_stimer0_isr(); return IRQ_HANDLED; @@ -81,14 +83,14 @@ static int hv_ce_set_next_event(unsigned long delta, current_tick = hv_read_reference_counter(); current_tick += delta; - hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick); + hv_set_msr(HV_MSR_STIMER0_COUNT, current_tick); return 0; } static int hv_ce_shutdown(struct clock_event_device *evt) { - hv_set_register(HV_REGISTER_STIMER0_COUNT, 0); - hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0); + hv_set_msr(HV_MSR_STIMER0_COUNT, 0); + hv_set_msr(HV_MSR_STIMER0_CONFIG, 0); if (direct_mode_enabled && stimer0_irq >= 0) disable_percpu_irq(stimer0_irq); @@ -119,7 +121,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt) timer_cfg.direct_mode = 0; timer_cfg.sintx = stimer0_message_sint; } - hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64); + hv_set_msr(HV_MSR_STIMER0_CONFIG, timer_cfg.as_uint64); return 0; } @@ -137,7 +139,21 @@ static int hv_stimer_init(unsigned int cpu) ce->name = "Hyper-V clockevent"; ce->features = CLOCK_EVT_FEAT_ONESHOT; ce->cpumask = cpumask_of(cpu); - ce->rating = 1000; + + /* + * Lower the rating of the Hyper-V timer in a TDX VM without paravisor, + * so the local APIC timer (lapic_clockevent) is the default timer in + * such a VM. The Hyper-V timer is not preferred in such a VM because + * it depends on the slow VM Reference Counter MSR (the Hyper-V TSC + * page is not enbled in such a VM because the VM uses Invariant TSC + * as a better clocksource and it's challenging to mark the Hyper-V + * TSC page shared in very early boot). + */ + if (!ms_hyperv.paravisor_present && hv_isolation_type_tdx()) + ce->rating = 90; + else + ce->rating = 1000; + ce->set_state_shutdown = hv_ce_shutdown; ce->set_state_oneshot = hv_ce_set_oneshot; ce->set_next_event = hv_ce_set_next_event; @@ -196,6 +212,7 @@ void __weak hv_remove_stimer0_handler(void) { }; +#ifdef CONFIG_ACPI /* Called only on architectures with per-cpu IRQs (i.e., not x86/x64) */ static int hv_setup_stimer0_irq(void) { @@ -230,6 +247,16 @@ static void hv_remove_stimer0_irq(void) stimer0_irq = -1; } } +#else +static int hv_setup_stimer0_irq(void) +{ + return 0; +} + +static void hv_remove_stimer0_irq(void) +{ +} +#endif /* hv_stimer_alloc - Global initialization of the clockevent and stimer0 */ int hv_stimer_alloc(bool have_percpu_irqs) @@ -354,6 +381,20 @@ void hv_stimer_global_cleanup(void) } EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); +static __always_inline u64 read_hv_clock_msr(void) +{ + /* + * Read the partition counter to get the current tick count. This count + * is set to 0 when the partition is created and is incremented in 100 + * nanosecond units. + * + * Use hv_raw_get_msr() because this function is used from + * noinstr. Notable; while HV_MSR_TIME_REF_COUNT is a synthetic + * register it doesn't need the GHCB path. + */ + return hv_raw_get_msr(HV_MSR_TIME_REF_COUNT); +} + /* * Code and definitions for the Hyper-V clocksources. Two * clocksources are defined: one that reads the Hyper-V defined MSR, and @@ -365,7 +406,7 @@ EXPORT_SYMBOL_GPL(hv_stimer_global_cleanup); static union { struct ms_hyperv_tsc_page page; u8 reserved[PAGE_SIZE]; -} tsc_pg __aligned(PAGE_SIZE); +} tsc_pg __bss_decrypted __aligned(PAGE_SIZE); static struct ms_hyperv_tsc_page *tsc_page = &tsc_pg.page; static unsigned long tsc_pfn; @@ -382,14 +423,20 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void) } EXPORT_SYMBOL_GPL(hv_get_tsc_page); -static u64 notrace read_hv_clock_tsc(void) +static __always_inline u64 read_hv_clock_tsc(void) { - u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); + u64 cur_tsc, time; - if (current_tick == U64_MAX) - current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT); + /* + * The Hyper-V Top-Level Function Spec (TLFS), section Timers, + * subsection Refererence Counter, guarantees that the TSC and MSR + * times are in sync and monotonic. Therefore we can fall back + * to the MSR in case the TSC page indicates unavailability. + */ + if (!hv_read_tsc_page_tsc(tsc_page, &cur_tsc, &time)) + time = read_hv_clock_msr(); - return current_tick; + return time; } static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) @@ -397,7 +444,7 @@ static u64 notrace read_hv_clock_tsc_cs(struct clocksource *arg) return read_hv_clock_tsc(); } -static u64 notrace read_hv_sched_clock_tsc(void) +static u64 noinstr read_hv_sched_clock_tsc(void) { return (read_hv_clock_tsc() - hv_sched_clock_offset) * (NSEC_PER_SEC / HV_CLOCK_HZ); @@ -408,9 +455,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg) union hv_reference_tsc_msr tsc_msr; /* Disable the TSC page */ - tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); + tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC); tsc_msr.enable = 0; - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); + hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64); } @@ -419,10 +466,21 @@ static void resume_hv_clock_tsc(struct clocksource *arg) union hv_reference_tsc_msr tsc_msr; /* Re-enable the TSC page */ - tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); + tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC); tsc_msr.enable = 1; tsc_msr.pfn = tsc_pfn; - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); + hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64); +} + +/* + * Called during resume from hibernation, from overridden + * x86_platform.restore_sched_clock_state routine. This is to adjust offsets + * used to calculate time for hv tsc page based sched_clock, to account for + * time spent before hibernation. + */ +void hv_adj_sched_clock_offset(u64 offset) +{ + hv_sched_clock_offset -= offset; } #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK @@ -449,30 +507,14 @@ static struct clocksource hyperv_cs_tsc = { #endif }; -static u64 notrace read_hv_clock_msr(void) -{ - /* - * Read the partition counter to get the current tick count. This count - * is set to 0 when the partition is created and is incremented in - * 100 nanosecond units. - */ - return hv_get_register(HV_REGISTER_TIME_REF_COUNT); -} - static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) { return read_hv_clock_msr(); } -static u64 notrace read_hv_sched_clock_msr(void) -{ - return (read_hv_clock_msr() - hv_sched_clock_offset) * - (NSEC_PER_SEC / HV_CLOCK_HZ); -} - static struct clocksource hyperv_cs_msr = { .name = "hyperv_clocksource_msr", - .rating = 500, + .rating = 495, .read = read_hv_clock_msr_cs, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, @@ -502,29 +544,34 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) static __always_inline void hv_setup_sched_clock(void *sched_clock) {} #endif /* CONFIG_GENERIC_SCHED_CLOCK */ -static bool __init hv_init_tsc_clocksource(void) +static void __init hv_init_tsc_clocksource(void) { union hv_reference_tsc_msr tsc_msr; - if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) - return false; - /* + * When running as a guest partition: + * * If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly * handles frequency and offset changes due to live migration, * pause/resume, and other VM management operations. So lower the * Hyper-V Reference TSC rating, causing the generic TSC to be used. * TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference * TSC will be preferred over the virtualized ARM64 arch counter. - * While the Hyper-V MSR clocksource won't be used since the - * Reference TSC clocksource is present, change its rating as - * well for consistency. + * + * When running as the root partition: + * + * There is no HV_ACCESS_TSC_INVARIANT feature. Always lower the rating + * of the Hyper-V Reference TSC. */ - if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) { + if ((ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) || + hv_root_partition()) { hyperv_cs_tsc.rating = 250; - hyperv_cs_msr.rating = 250; + hyperv_cs_msr.rating = 245; } + if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) + return; + hv_read_reference_counter = read_hv_clock_tsc; /* @@ -543,44 +590,45 @@ static bool __init hv_init_tsc_clocksource(void) * thus TSC clocksource will work even without the real TSC page * mapped. */ - tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); - if (hv_root_partition) + tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC); + if (hv_root_partition()) tsc_pfn = tsc_msr.pfn; else tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page)); tsc_msr.enable = 1; tsc_msr.pfn = tsc_pfn; - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); + hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); - hv_sched_clock_offset = hv_read_reference_counter(); - hv_setup_sched_clock(read_hv_sched_clock_tsc); - - return true; + /* + * If TSC is invariant, then let it stay as the sched clock since it + * will be faster than reading the TSC page. But if not invariant, use + * the TSC page so that live migrations across hosts with different + * frequencies is handled correctly. + */ + if (!(ms_hyperv.features & HV_ACCESS_TSC_INVARIANT)) { + hv_sched_clock_offset = hv_read_reference_counter(); + hv_setup_sched_clock(read_hv_sched_clock_tsc); + } } void __init hv_init_clocksource(void) { /* - * Try to set up the TSC page clocksource. If it succeeds, we're - * done. Otherwise, set up the MSR clocksource. At least one of - * these will always be available except on very old versions of - * Hyper-V on x86. In that case we won't have a Hyper-V + * Try to set up the TSC page clocksource, then the MSR clocksource. + * At least one of these will always be available except on very old + * versions of Hyper-V on x86. In that case we won't have a Hyper-V * clocksource, but Linux will still run with a clocksource based * on the emulated PIT or LAPIC timer. + * + * Never use the MSR clocksource as sched clock. It's too slow. + * Better to use the native sched clock as the fallback. */ - if (hv_init_tsc_clocksource()) - return; - - if (!(ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE)) - return; - - hv_read_reference_counter = read_hv_clock_msr; - clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); + hv_init_tsc_clocksource(); - hv_sched_clock_offset = hv_read_reference_counter(); - hv_setup_sched_clock(read_hv_sched_clock_msr); + if (ms_hyperv.features & HV_MSR_TIME_REF_COUNT_AVAILABLE) + clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); } void __init hv_remap_tsc_clocksource(void) @@ -588,7 +636,7 @@ void __init hv_remap_tsc_clocksource(void) if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) return; - if (!hv_root_partition) { + if (!hv_root_partition()) { WARN(1, "%s: attempt to remap TSC page in guest partition\n", __func__); return; |
