summaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-12-05 17:11:48 +1000
committerDave Airlie <airlied@redhat.com>2016-12-05 17:11:48 +1000
commitf03ee46be9401e3434f52bb15e92d1e640f76438 (patch)
treef0a1819bd3e44902578b80e1a03d1dde1c6099b8 /arch/x86/kvm
parent0d5320fc194128a1a584a7e91a606cb3af2ded80 (diff)
parent3e5de27e940d00d8d504dfb96625fb654f641509 (diff)
Backmerge tag 'v4.9-rc8' into drm-next
Linux 4.9-rc8 Daniel requested this so we could apply some follow on fixes cleanly to -next.
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/emulate.c36
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/ioapic.h4
-rw-r--r--arch/x86/kvm/irq_comm.c71
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/x86.c47
6 files changed, 89 insertions, 73 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index cbd7b92585bb..a3ce9d260d68 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
- unsigned short sel, old_sel;
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
+ unsigned short sel;
+ struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
- /* Assignment of RIP may only fail in 64-bit mode */
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
- VCPU_SREG_CS);
-
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- /* assigning eip failed; restore the old cs */
- ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
- return rc;
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
- u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
- struct desc_struct old_desc, new_desc;
- const struct x86_emulate_ops *ops = ctxt->ops;
-
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
- VCPU_SREG_CS);
+ struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
- if (rc != X86EMUL_CONTINUE) {
- WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
- ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
- }
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+
return rc;
}
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 1a22de70f7f7..6e219e5c07d2 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 7d2692a49657..1cc6e54436db 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -42,13 +42,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
- DECLARE_BITMAP(map, KVM_MAX_VCPUS);
+ DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
- u8 vectors[KVM_MAX_VCPUS];
+ u8 vectors[KVM_MAX_VCPU_ID];
};
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 25810b144b58..6c0191615f23 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_pic *pic = pic_irqchip(kvm);
+
+ /*
+ * XXX: rejecting pic routes when pic isn't in use would be better,
+ * but the default routing table is installed while kvm->arch.vpic is
+ * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
+ */
+ if (!pic)
+ return -1;
+
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
}
@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+
+ if (!ioapic)
+ return -1;
+
return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
line_status);
}
@@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
}
+static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status)
+{
+ if (!level)
+ return -1;
+
+ return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
+}
+
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
@@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq irq;
int r;
- if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
- return -EWOULDBLOCK;
+ switch (e->type) {
+ case KVM_IRQ_ROUTING_HV_SINT:
+ return kvm_hv_set_sint(e, kvm, irq_source_id, level,
+ line_status);
- if (kvm_msi_route_invalid(kvm, e))
- return -EINVAL;
+ case KVM_IRQ_ROUTING_MSI:
+ if (kvm_msi_route_invalid(kvm, e))
+ return -EINVAL;
- kvm_set_msi_irq(kvm, e, &irq);
+ kvm_set_msi_irq(kvm, e, &irq);
- if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
- return r;
- else
- return -EWOULDBLOCK;
+ if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
+ return r;
+ break;
+
+ default:
+ break;
+ }
+
+ return -EWOULDBLOCK;
}
int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
-static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- if (!level)
- return -1;
-
- return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
-}
-
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
@@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
-int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
- int irq_source_id, int level, bool line_status)
-{
- switch (irq->type) {
- case KVM_IRQ_ROUTING_HV_SINT:
- return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
- line_status);
- default:
- return -EWOULDBLOCK;
- }
-}
-
void kvm_arch_irq_routing_update(struct kvm *kvm)
{
kvm_hv_irq_routing_update(kvm);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 23b99f305382..6f69340f9fa3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
*mask = dest_id & 0xff;
return true;
case KVM_APIC_MODE_XAPIC_CLUSTER:
- *cluster = map->xapic_cluster_map[dest_id >> 4];
+ *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
*mask = dest_id & 0xf;
return true;
default:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7e30c720d0c5..073eaeabc2a7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
+ unsigned long flags;
+ /*
+ * Disabling irqs at this point since the following code could be
+ * interrupted and executed through kvm_arch_hardware_disable()
+ */
+ local_irq_save(flags);
+ if (locals->registered) {
+ locals->registered = false;
+ user_return_notifier_unregister(urn);
+ }
+ local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
- locals->registered = false;
- user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
static u64 __get_kvmclock_ns(struct kvm *kvm)
{
- struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
struct kvm_arch *ka = &kvm->arch;
- s64 ns;
+ struct pvclock_vcpu_time_info hv_clock;
- if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
- u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
- ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
- } else {
- ns = ktime_get_boot_ns() + ka->kvmclock_offset;
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+ if (!ka->use_master_clock) {
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+ return ktime_get_boot_ns() + ka->kvmclock_offset;
}
- return ns;
+ hv_clock.tsc_timestamp = ka->master_cycle_now;
+ hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+ &hv_clock.tsc_shift,
+ &hv_clock.tsc_to_system_mul);
+ return __pvclock_read_cycles(&hv_clock, rdtsc());
}
u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
- case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif
r = 1;
break;
+ case KVM_CAP_ADJUST_CLOCK:
+ r = KVM_CLOCK_TSC_STABLE;
+ break;
case KVM_CAP_X86_SMM:
/* SMBASE is usually relocated above 1M on modern chipsets,
* and SMM handlers might indeed rely on 4G segment limits,
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
+ int idx;
r = -EINVAL;
if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;
- now_ns = get_kvmclock_ns(kvm);
+ local_irq_disable();
+ now_ns = __get_kvmclock_ns(kvm);
user_ns.clock = now_ns;
- user_ns.flags = 0;
+ user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
+ local_irq_enable();
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;