summaryrefslogtreecommitdiff
path: root/arch/x86/hyperv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/hyperv')
-rw-r--r--arch/x86/hyperv/hv_apic.c59
-rw-r--r--arch/x86/hyperv/mmu.c73
2 files changed, 64 insertions, 68 deletions
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 402338365651..0c3c9f8fee77 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -31,6 +31,8 @@
#include <asm/mshyperv.h>
#include <asm/apic.h>
+#include <asm/trace/hyperv.h>
+
static struct apic orig_apic;
static u64 hv_apic_icr_read(void)
@@ -99,6 +101,9 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
int nr_bank = 0;
int ret = 1;
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+
local_irq_save(flags);
arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -130,10 +135,10 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex **arg;
- struct ipi_arg_non_ex *ipi_arg;
+ struct ipi_arg_non_ex ipi_arg;
int ret = 1;
- unsigned long flags;
+
+ trace_hyperv_send_ipi_mask(mask, vector);
if (cpumask_empty(mask))
return true;
@@ -144,40 +149,43 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- return __send_ipi_mask_ex(mask, vector);
-
- local_irq_save(flags);
- arg = (struct ipi_arg_non_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
-
- ipi_arg = *arg;
- if (unlikely(!ipi_arg))
- goto ipi_mask_done;
-
- ipi_arg->vector = vector;
- ipi_arg->reserved = 0;
- ipi_arg->cpu_mask = 0;
+ /*
+ * From the supplied CPU set we need to figure out if we can get away
+ * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the
+ * highest VP number in the set is < 64. As VP numbers are usually in
+ * ascending order and match Linux CPU ids, here is an optimization:
+ * we check the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is
+ * a must. We will also check all VP numbers when walking the supplied
+ * CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64)
+ goto do_ex_hypercall;
+
+ ipi_arg.vector = vector;
+ ipi_arg.cpu_mask = 0;
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
if (vcpu == VP_INVAL)
- goto ipi_mask_done;
+ return true;
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.
*/
if (vcpu >= 64)
- goto ipi_mask_done;
+ goto do_ex_hypercall;
- __set_bit(vcpu, (unsigned long *)&ipi_arg->cpu_mask);
+ __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
}
- ret = hv_do_hypercall(HVCALL_SEND_IPI, ipi_arg, NULL);
-
-ipi_mask_done:
- local_irq_restore(flags);
+ ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+ ipi_arg.cpu_mask);
return ((ret == 0) ? true : false);
+
+do_ex_hypercall:
+ return __send_ipi_mask_ex(mask, vector);
}
static bool __send_ipi_one(int cpu, int vector)
@@ -233,10 +241,7 @@ static void hv_send_ipi_self(int vector)
void __init hv_apic_init(void)
{
if (ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) {
- if ((ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- pr_info("Hyper-V: Using ext hypercalls for IPI\n");
- else
- pr_info("Hyper-V: Using IPI hypercalls\n");
+ pr_info("Hyper-V: Using IPI hypercalls\n");
/*
* Set the IPI entry points.
*/
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index de27615c51ea..0d90e515ec98 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -16,6 +16,8 @@
/* Each gva in gva_list encodes up to 4096 pages to flush */
#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info);
/*
* Fills in gva_list starting from offset. Returns the number of items added.
@@ -93,10 +95,24 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
if (cpumask_equal(cpus, cpu_present_mask)) {
flush->flags |= HV_FLUSH_ALL_PROCESSORS;
} else {
+ /*
+ * From the supplied CPU set we need to figure out if we can get
+ * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
+ * hypercalls. This is possible when the highest VP number in
+ * the set is < 64. As VP numbers are usually in ascending order
+ * and match Linux CPU ids, here is an optimization: we check
+ * the VP number for the highest bit in the supplied set first
+ * so we can quickly find out if using *_EX hypercalls is a
+ * must. We will also check all VP numbers when walking the
+ * supplied CPU set to remain correct in all cases.
+ */
+ if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
+ goto do_ex_hypercall;
+
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
if (vcpu >= 64)
- goto do_native;
+ goto do_ex_hypercall;
__set_bit(vcpu, (unsigned long *)
&flush->processor_mask);
@@ -123,7 +139,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
gva_n, 0, flush, NULL);
}
+ goto check_status;
+
+do_ex_hypercall:
+ status = hyperv_flush_tlb_others_ex(cpus, info);
+check_status:
local_irq_restore(flags);
if (!(status & HV_HYPERCALL_RESULT_MASK))
@@ -132,35 +153,22 @@ do_native:
native_flush_tlb_others(cpus, info);
}
-static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
- const struct flush_tlb_info *info)
+static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
{
int nr_bank = 0, max_gvas, gva_n;
struct hv_tlb_flush_ex **flush_pcpu;
struct hv_tlb_flush_ex *flush;
- u64 status = U64_MAX;
- unsigned long flags;
-
- trace_hyperv_mmu_flush_tlb_others(cpus, info);
+ u64 status;
- if (!hv_hypercall_pg)
- goto do_native;
-
- if (cpumask_empty(cpus))
- return;
-
- local_irq_save(flags);
+ if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return U64_MAX;
flush_pcpu = (struct hv_tlb_flush_ex **)
this_cpu_ptr(hyperv_pcpu_input_arg);
flush = *flush_pcpu;
- if (unlikely(!flush)) {
- local_irq_restore(flags);
- goto do_native;
- }
-
if (info->mm) {
/*
* AddressSpace argument must match the CR3 with PCID bits
@@ -176,15 +184,8 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->hv_vp_set.valid_bank_mask = 0;
- if (!cpumask_equal(cpus, cpu_present_mask)) {
- flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
- nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
- }
-
- if (!nr_bank) {
- flush->hv_vp_set.format = HV_GENERIC_SET_ALL;
- flush->flags |= HV_FLUSH_ALL_PROCESSORS;
- }
+ flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
/*
* We can flush not more than max_gvas with one hypercall. Flush the
@@ -213,12 +214,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
gva_n, nr_bank, flush, NULL);
}
- local_irq_restore(flags);
-
- if (!(status & HV_HYPERCALL_RESULT_MASK))
- return;
-do_native:
- native_flush_tlb_others(cpus, info);
+ return status;
}
void hyperv_setup_mmu_ops(void)
@@ -226,11 +222,6 @@ void hyperv_setup_mmu_ops(void)
if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
return;
- if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
- pr_info("Using hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
- } else {
- pr_info("Using ext hypercall for remote TLB flush\n");
- pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others_ex;
- }
+ pr_info("Using hypercall for remote TLB flush\n");
+ pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
}