summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/devices/arm-vgic.txt5
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_emulate.h24
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h24
-rw-r--r--arch/arm64/kvm/vgic-sys-reg-v3.c23
-rw-r--r--virt/kvm/arm/mmu.c40
-rw-r--r--virt/kvm/arm/vgic/vgic-debug.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c10
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c47
-rw-r--r--virt/kvm/arm/vgic/vgic.h16
10 files changed, 125 insertions, 69 deletions
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt
index b2f60ca8b60c..b3ce12643553 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virtual/kvm/devices/arm-vgic.txt
@@ -83,6 +83,11 @@ Groups:
Bits for undefined preemption levels are RAZ/WI.
+ Note that this differs from a CPU's view of the APRs on hardware in which
+ a GIC without the security extensions expose group 0 and group 1 active
+ priorities in separate register groups, whereas we show a combined view
+ similar to GICv2's GICH_APR.
+
For historical reasons and to provide ABI compatibility with userspace we
export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
field in the lower 5 bits of a word, meaning that userspace must always
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index ebf020b02bc8..c8781450905b 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -227,7 +227,6 @@
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
#define HSR_DABT_CM (_AC(1, UL) << 8)
-#define HSR_DABT_EA (_AC(1, UL) << 9)
#define kvm_arm_exception_type \
{0, "RESET" }, \
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 9a8a45aaf19a..98089ffd91bb 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -149,11 +149,6 @@ static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}
-static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
-{
- return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
-}
-
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
@@ -206,6 +201,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
}
+static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
+{
+ switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+ case FSC_SEA:
+ case FSC_SEA_TTW0:
+ case FSC_SEA_TTW1:
+ case FSC_SEA_TTW2:
+ case FSC_SEA_TTW3:
+ case FSC_SECC:
+ case FSC_SECC_TTW0:
+ case FSC_SECC_TTW1:
+ case FSC_SECC_TTW2:
+ case FSC_SECC_TTW3:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fe39e6841326..e5df3fce0008 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -188,11 +188,6 @@ static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
-}
-
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
@@ -240,6 +235,25 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
+static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
+{
+ switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+ case FSC_SEA:
+ case FSC_SEA_TTW0:
+ case FSC_SEA_TTW1:
+ case FSC_SEA_TTW2:
+ case FSC_SEA_TTW3:
+ case FSC_SECC:
+ case FSC_SECC_TTW0:
+ case FSC_SECC_TTW1:
+ case FSC_SECC_TTW2:
+ case FSC_SECC_TTW3:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
diff --git a/arch/arm64/kvm/vgic-sys-reg-v3.c b/arch/arm64/kvm/vgic-sys-reg-v3.c
index 116786d2e8e8..c77d508b7462 100644
--- a/arch/arm64/kvm/vgic-sys-reg-v3.c
+++ b/arch/arm64/kvm/vgic-sys-reg-v3.c
@@ -208,29 +208,12 @@ static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r, u8 apr)
{
- struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
u8 idx = r->Op2 & 3;
- /*
- * num_pri_bits are initialized with HW supported values.
- * We can rely safely on num_pri_bits even if VM has not
- * restored ICC_CTLR_EL1 before restoring APnR registers.
- */
- switch (vgic_v3_cpu->num_pri_bits) {
- case 7:
- vgic_v3_access_apr_reg(vcpu, p, apr, idx);
- break;
- case 6:
- if (idx > 1)
- goto err;
- vgic_v3_access_apr_reg(vcpu, p, apr, idx);
- break;
- default:
- if (idx > 0)
- goto err;
- vgic_v3_access_apr_reg(vcpu, p, apr, idx);
- }
+ if (idx > vgic_v3_max_apr_idx(vcpu))
+ goto err;
+ vgic_v3_access_apr_reg(vcpu, p, apr, idx);
return true;
err:
if (!p->is_write)
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 2ea21dac0b44..b36945d49986 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1454,25 +1454,6 @@ out:
kvm_set_pfn_accessed(pfn);
}
-static bool is_abort_sea(unsigned long fault_status)
-{
- switch (fault_status) {
- case FSC_SEA:
- case FSC_SEA_TTW0:
- case FSC_SEA_TTW1:
- case FSC_SEA_TTW2:
- case FSC_SEA_TTW3:
- case FSC_SECC:
- case FSC_SECC_TTW0:
- case FSC_SECC_TTW1:
- case FSC_SECC_TTW2:
- case FSC_SECC_TTW3:
- return true;
- default:
- return false;
- }
-}
-
/**
* kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer
@@ -1498,20 +1479,21 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
+ is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- /*
- * The host kernel will handle the synchronous external abort. There
- * is no need to pass the error into the guest.
- */
- if (is_abort_sea(fault_status)) {
+ /* Synchronous External Abort? */
+ if (kvm_vcpu_dabt_isextabt(vcpu)) {
+ /*
+ * For RAS the host kernel may handle this abort.
+ * There is no need to pass the error into the guest.
+ */
if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
return 1;
- }
- is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
- if (unlikely(!is_iabt && kvm_vcpu_dabt_isextabt(vcpu))) {
- kvm_inject_vabt(vcpu);
- return 1;
+ if (unlikely(!is_iabt)) {
+ kvm_inject_vabt(vcpu);
+ return 1;
+ }
}
trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c
index 7072ab743332..10b38178cff2 100644
--- a/virt/kvm/arm/vgic/vgic-debug.c
+++ b/virt/kvm/arm/vgic/vgic-debug.c
@@ -234,7 +234,7 @@ static int vgic_debug_show(struct seq_file *s, void *v)
return 0;
}
-static struct seq_operations vgic_debug_seq_ops = {
+static const struct seq_operations vgic_debug_seq_ops = {
.start = vgic_debug_start,
.next = vgic_debug_next,
.stop = vgic_debug_stop,
@@ -255,7 +255,7 @@ static int debug_open(struct inode *inode, struct file *file)
return ret;
};
-static struct file_operations vgic_debug_fops = {
+static const struct file_operations vgic_debug_fops = {
.owner = THIS_MODULE,
.open = debug_open,
.read = seq_read,
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index aa6b68db80b4..f51c1e1b3f70 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -144,7 +144,6 @@ struct its_ite {
struct vgic_irq *irq;
struct its_collection *collection;
- u32 lpi;
u32 event_id;
};
@@ -813,7 +812,7 @@ static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
/* Must be called with its_lock mutex held */
static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
struct its_collection *collection,
- u32 lpi_id, u32 event_id)
+ u32 event_id)
{
struct its_ite *ite;
@@ -823,7 +822,6 @@ static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
ite->event_id = event_id;
ite->collection = collection;
- ite->lpi = lpi_id;
list_add_tail(&ite->ite_list, &device->itt_head);
return ite;
@@ -873,7 +871,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
new_coll = collection;
}
- ite = vgic_its_alloc_ite(device, collection, lpi_nr, event_id);
+ ite = vgic_its_alloc_ite(device, collection, event_id);
if (IS_ERR(ite)) {
if (new_coll)
vgic_its_free_collection(its, coll_id);
@@ -1848,7 +1846,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
- ((u64)ite->lpi << KVM_ITS_ITE_PINTID_SHIFT) |
+ ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
ite->collection->collection_id;
val = cpu_to_le64(val);
return kvm_write_guest(kvm, gpa, &val, ite_esz);
@@ -1895,7 +1893,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
if (!collection)
return -EINVAL;
- ite = vgic_its_alloc_ite(dev, collection, lpi_id, event_id);
+ ite = vgic_its_alloc_ite(dev, collection, event_id);
if (IS_ERR(ite))
return PTR_ERR(ite);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
index 37522e65eb53..b3d4a10f09a1 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v2.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -303,6 +303,51 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
vgic_set_vmcr(vcpu, &vmcr);
}
+static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ int n; /* which APRn is this */
+
+ n = (addr >> 2) & 0x3;
+
+ if (kvm_vgic_global_state.type == VGIC_V2) {
+ /* GICv2 hardware systems support max. 32 groups */
+ if (n != 0)
+ return 0;
+ return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
+ } else {
+ struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ if (n > vgic_v3_max_apr_idx(vcpu))
+ return 0;
+ /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
+ return vgicv3->vgic_ap1r[n];
+ }
+}
+
+static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ int n; /* which APRn is this */
+
+ n = (addr >> 2) & 0x3;
+
+ if (kvm_vgic_global_state.type == VGIC_V2) {
+ /* GICv2 hardware systems support max. 32 groups */
+ if (n != 0)
+ return;
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
+ } else {
+ struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ if (n > vgic_v3_max_apr_idx(vcpu))
+ return;
+ /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
+ vgicv3->vgic_ap1r[n] = val;
+ }
+}
+
static const struct vgic_register_region vgic_v2_dist_registers[] = {
REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
@@ -364,7 +409,7 @@ static const struct vgic_register_region vgic_v2_cpu_registers[] = {
vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
- vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
+ vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index bba7fa22a7f7..bf9ceab67c77 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -220,4 +220,20 @@ int vgic_debug_destroy(struct kvm *kvm);
bool lock_all_vcpus(struct kvm *kvm);
void unlock_all_vcpus(struct kvm *kvm);
+static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
+
+ /*
+ * num_pri_bits are initialized with HW supported values.
+ * We can rely safely on num_pri_bits even if VM has not
+ * restored ICC_CTLR_EL1 before restoring APnR registers.
+ */
+ switch (cpu_if->num_pri_bits) {
+ case 7: return 3;
+ case 6: return 1;
+ default: return 0;
+ }
+}
+
#endif