summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-08-22 14:07:56 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-22 14:07:56 +0200
commit631989303b06b8fdb15ec3b88aee2d25e80d4cec (patch)
tree25c00dc9392cb7b25e5c9355a6176f6f4be36924 /arch/arm
parentad1d69735878a6bf797705b5d2a20316d35e1113 (diff)
parent976d34e2dab10ece5ea8fe7090b7692913f89084 (diff)
Merge tag 'kvmarm-for-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for 4.19 - Support for Group0 interrupts in guests - Cache management optimizations for ARMv8.4 systems - Userspace interface for RAS, allowing error retrival and injection - Fault path optimization - Emulated physical timer fixes - Random cleanups
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/include/asm/kvm_mmu.h14
-rw-r--r--arch/arm/include/uapi/asm/kvm.h13
-rw-r--r--arch/arm/kvm/coproc.c25
-rw-r--r--arch/arm/kvm/guest.c23
6 files changed, 79 insertions, 13 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index fe2fb1ddd771..77121b713bef 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -107,9 +107,19 @@ static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
return (unsigned long *)&vcpu->arch.hcr;
}
+static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr &= ~HCR_TWE;
+}
+
+static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr |= HCR_TWE;
+}
+
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
- return 1;
+ return true;
}
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 1f1fe4109b02..79906cecb091 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -216,6 +216,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
unsigned long kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 8553d68b7c8a..265ea9cf7df7 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -75,17 +75,9 @@ phys_addr_t kvm_get_idmap_vector(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);
-static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
-{
- *pmd = new_pmd;
- dsb(ishst);
-}
-
-static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
-{
- *pte = new_pte;
- dsb(ishst);
-}
+#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
+#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
+#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 16e006f708ca..4602464ebdfb 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 3a02e76699a6..450c7a4fbc8a 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -246,6 +246,7 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
u64 reg;
+ bool g1;
if (!p->is_write)
return read_from_write_only(vcpu, p);
@@ -253,7 +254,25 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32;
reg |= *vcpu_reg(vcpu, p->Rt1) ;
- vgic_v3_dispatch_sgi(vcpu, reg);
+ /*
+ * In a system where GICD_CTLR.DS=1, a ICC_SGI0R access generates
+ * Group0 SGIs only, while ICC_SGI1R can generate either group,
+ * depending on the SGI configuration. ICC_ASGI1R is effectively
+ * equivalent to ICC_SGI0R, as there is no "alternative" secure
+ * group.
+ */
+ switch (p->Op1) {
+ default: /* Keep GCC quiet */
+ case 0: /* ICC_SGI1R */
+ g1 = true;
+ break;
+ case 1: /* ICC_ASGI1R */
+ case 2: /* ICC_SGI0R */
+ g1 = false;
+ break;
+ }
+
+ vgic_v3_dispatch_sgi(vcpu, reg, g1);
return true;
}
@@ -459,6 +478,10 @@ static const struct coproc_reg cp15_regs[] = {
/* ICC_SGI1R */
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
+ /* ICC_ASGI1R */
+ { CRm64(12), Op1( 1), is64, access_gic_sgi},
+ /* ICC_SGI0R */
+ { CRm64(12), Op1( 2), is64, access_gic_sgi},
/* VBAR: swapped by interrupt.S. */
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index a18f33edc471..2b8de885b2bf 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -261,6 +261,29 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+
+int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
+
+ return 0;
+}
+
+int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+ struct kvm_vcpu_events *events)
+{
+ bool serror_pending = events->exception.serror_pending;
+ bool has_esr = events->exception.serror_has_esr;
+
+ if (serror_pending && has_esr)
+ return -EINVAL;
+ else if (serror_pending)
+ kvm_inject_vabt(vcpu);
+
+ return 0;
+}
+
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {