diff options
Diffstat (limited to 'arch/arm64/kvm/vgic')
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-init.c | 30 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-its.c | 5 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-kvm-device.c | 70 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-mmio-v3.c | 33 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-v3-nested.c | 97 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-v4.c | 14 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-v5.c | 52 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic.c | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic.h | 48 |
9 files changed, 246 insertions, 107 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index eb1205654ac8..1e680ad6e863 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -157,6 +157,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) kvm->arch.vgic.in_kernel = true; kvm->arch.vgic.vgic_model = type; + kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; @@ -165,6 +166,9 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) else INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions); + if (type == KVM_DEV_TYPE_ARM_VGIC_V3) + kvm->arch.vgic.nassgicap = system_supports_direct_sgis(); + out_unlock: mutex_unlock(&kvm->arch.config_lock); kvm_unlock_all_vcpus(kvm); @@ -391,11 +395,10 @@ int vgic_init(struct kvm *kvm) goto out; /* - * If we have GICv4.1 enabled, unconditionally request enable the - * v4 support so that we get HW-accelerated vSGIs. Otherwise, only - * enable it if we present a virtual ITS to the guest. + * Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs, + * vLPIs) is supported. */ - if (vgic_supports_direct_msis(kvm)) { + if (vgic_supports_direct_irqs(kvm)) { ret = vgic_v4_init(kvm); if (ret) goto out; @@ -409,15 +412,7 @@ int vgic_init(struct kvm *kvm) goto out; vgic_debug_init(kvm); - - /* - * If userspace didn't set the GIC implementation revision, - * default to the latest and greatest. You know want it. - */ - if (!dist->implementation_rev) - dist->implementation_rev = KVM_VGIC_IMP_REV_LATEST; dist->initialized = true; - out: return ret; } @@ -443,7 +438,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) dist->vgic_cpu_base = VGIC_ADDR_UNDEF; } - if (vgic_supports_direct_msis(kvm)) + if (vgic_supports_direct_irqs(kvm)) vgic_v4_teardown(kvm); xa_destroy(&dist->lpi_xa); @@ -674,10 +669,12 @@ void kvm_vgic_init_cpu_hardware(void) * We want to make sure the list registers start out clear so that we * only have the program the used registers. */ - if (kvm_vgic_global_state.type == VGIC_V2) + if (kvm_vgic_global_state.type == VGIC_V2) { vgic_v2_init_lrs(); - else + } else if (kvm_vgic_global_state.type == VGIC_V3 || + kvm_vgic_global_state.has_gcie_v3_compat) { kvm_call_hyp(__vgic_v3_init_lrs); + } } /** @@ -722,6 +719,9 @@ int kvm_vgic_hyp_init(void) kvm_info("GIC system register CPU interface enabled\n"); } break; + case GIC_V5: + ret = vgic_v5_probe(gic_kvm_info); + break; default: ret = -ENODEV; } diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 534049c7c94b..7368c13f16b7 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -758,7 +758,7 @@ static void its_free_ite(struct kvm *kvm, struct its_ite *ite) if (irq) { scoped_guard(raw_spinlock_irqsave, &irq->irq_lock) { if (irq->hw) - WARN_ON(its_unmap_vlpi(ite->irq->host_irq)); + its_unmap_vlpi(ite->irq->host_irq); irq->hw = false; } @@ -2694,6 +2694,9 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) case KVM_DEV_ARM_ITS_RESTORE_TABLES: ret = abi->restore_tables(its); break; + default: + ret = -ENXIO; + break; } mutex_unlock(&its->its_lock); diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index f9ae790163fb..3d1a776b716d 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -5,6 +5,7 @@ * Copyright (C) 2015 ARM Ltd. * Author: Marc Zyngier <marc.zyngier@arm.com> */ +#include <linux/irqchip/arm-gic-v3.h> #include <linux/kvm_host.h> #include <kvm/arm_vgic.h> #include <linux/uaccess.h> @@ -303,12 +304,6 @@ static int vgic_get_common_attr(struct kvm_device *dev, VGIC_NR_PRIVATE_IRQS, uaddr); break; } - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: { - u32 __user *uaddr = (u32 __user *)(long)attr->addr; - - r = put_user(dev->kvm->arch.vgic.mi_intid, uaddr); - break; - } } return r; @@ -510,6 +505,24 @@ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, } /* + * Allow access to certain ID-like registers prior to VGIC initialization, + * thereby allowing the VMM to provision the features / sizing of the VGIC. + */ +static bool reg_allowed_pre_init(struct kvm_device_attr *attr) +{ + if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) + return false; + + switch (attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK) { + case GICD_IIDR: + case GICD_TYPER2: + return true; + default: + return false; + } +} + +/* * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state * * @dev: kvm device handle @@ -523,7 +536,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, struct vgic_reg_attr reg_attr; gpa_t addr; struct kvm_vcpu *vcpu; - bool uaccess, post_init = true; + bool uaccess; u32 val; int ret; @@ -539,9 +552,6 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, /* Sysregs uaccess is performed by the sysreg handling code */ uaccess = false; break; - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: - post_init = false; - fallthrough; default: uaccess = true; } @@ -561,7 +571,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->arch.config_lock); - if (post_init != vgic_initialized(dev->kvm)) { + if (!(vgic_initialized(dev->kvm) || reg_allowed_pre_init(attr))) { ret = -EBUSY; goto out; } @@ -591,19 +601,6 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev, } break; } - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: - if (!is_write) { - val = dev->kvm->arch.vgic.mi_intid; - ret = 0; - break; - } - - ret = -EINVAL; - if ((val < VGIC_NR_PRIVATE_IRQS) && (val >= VGIC_NR_SGIS)) { - dev->kvm->arch.vgic.mi_intid = val; - ret = 0; - } - break; default: ret = -EINVAL; break; @@ -630,8 +627,24 @@ static int vgic_v3_set_attr(struct kvm_device *dev, case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: return vgic_v3_attr_regs_access(dev, attr, true); + case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: { + u32 __user *uaddr = (u32 __user *)attr->addr; + u32 val; + + if (get_user(val, uaddr)) + return -EFAULT; + + guard(mutex)(&dev->kvm->arch.config_lock); + if (vgic_initialized(dev->kvm)) + return -EBUSY; + + if (!irq_is_ppi(val)) + return -EINVAL; + + dev->kvm->arch.vgic.mi_intid = val; + return 0; + } default: return vgic_set_common_attr(dev, attr); } @@ -645,8 +658,13 @@ static int vgic_v3_get_attr(struct kvm_device *dev, case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: - case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: return vgic_v3_attr_regs_access(dev, attr, false); + case KVM_DEV_ARM_VGIC_GRP_MAINT_IRQ: { + u32 __user *uaddr = (u32 __user *)(long)attr->addr; + + guard(mutex)(&dev->kvm->arch.config_lock); + return put_user(dev->kvm->arch.vgic.mi_intid, uaddr); + } default: return vgic_get_common_attr(dev, attr); } diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index ae4c0593d114..a3ef185209e9 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -50,8 +50,17 @@ bool vgic_has_its(struct kvm *kvm) bool vgic_supports_direct_msis(struct kvm *kvm) { - return (kvm_vgic_global_state.has_gicv4_1 || - (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm))); + return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm); +} + +bool system_supports_direct_sgis(void) +{ + return kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi(); +} + +bool vgic_supports_direct_sgis(struct kvm *kvm) +{ + return kvm->arch.vgic.nassgicap; } /* @@ -86,7 +95,7 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, } break; case GICD_TYPER2: - if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi()) + if (vgic_supports_direct_sgis(vcpu->kvm)) value = GICD_TYPER2_nASSGIcap; break; case GICD_IIDR: @@ -119,7 +128,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; /* Not a GICv4.1? No HW SGIs */ - if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi()) + if (!vgic_supports_direct_sgis(vcpu->kvm)) val &= ~GICD_CTLR_nASSGIreq; /* Dist stays enabled? nASSGIreq is RO */ @@ -133,7 +142,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); - if (kvm_vgic_global_state.has_gicv4_1 && + if (vgic_supports_direct_sgis(vcpu->kvm) && was_enabled != dist->enabled) kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4); else if (!was_enabled && dist->enabled) @@ -159,8 +168,18 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, switch (addr & 0x0c) { case GICD_TYPER2: - if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) + reg = vgic_mmio_read_v3_misc(vcpu, addr, len); + + if (reg == val) + return 0; + if (vgic_initialized(vcpu->kvm)) + return -EBUSY; + if ((reg ^ val) & ~GICD_TYPER2_nASSGIcap) return -EINVAL; + if (!system_supports_direct_sgis() && val) + return -EINVAL; + + dist->nassgicap = val & GICD_TYPER2_nASSGIcap; return 0; case GICD_IIDR: reg = vgic_mmio_read_v3_misc(vcpu, addr, len); @@ -178,7 +197,7 @@ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, } case GICD_CTLR: /* Not a GICv4.1? No HW SGIs */ - if (!kvm_vgic_global_state.has_gicv4_1) + if (!vgic_supports_direct_sgis(vcpu->kvm)) val &= ~GICD_CTLR_nASSGIreq; dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c b/arch/arm64/kvm/vgic/vgic-v3-nested.c index 4f6954c30674..7f1259b49c50 100644 --- a/arch/arm64/kvm/vgic/vgic-v3-nested.c +++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c @@ -36,6 +36,11 @@ struct shadow_if { static DEFINE_PER_CPU(struct shadow_if, shadow_if); +static int lr_map_idx_to_shadow_idx(struct shadow_if *shadow_if, int idx) +{ + return hweight16(shadow_if->lr_map & (BIT(idx) - 1)); +} + /* * Nesting GICv3 support * @@ -111,7 +116,7 @@ bool vgic_state_is_nested(struct kvm_vcpu *vcpu) { u64 xmo; - if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) { + if (is_nested_ctxt(vcpu)) { xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO); WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO), "Separate virtual IRQ/FIQ settings not supported\n"); @@ -209,6 +214,29 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu) return reg; } +static u64 translate_lr_pintid(struct kvm_vcpu *vcpu, u64 lr) +{ + struct vgic_irq *irq; + + if (!(lr & ICH_LR_HW)) + return lr; + + /* We have the HW bit set, check for validity of pINTID */ + irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); + /* If there was no real mapping, nuke the HW bit */ + if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI) + lr &= ~ICH_LR_HW; + + /* Translate the virtual mapping to the real one, even if invalid */ + if (irq) { + lr &= ~ICH_LR_PHYS_ID_MASK; + lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid); + vgic_put_irq(vcpu->kvm, irq); + } + + return lr; +} + /* * For LRs which have HW bit set such as timer interrupts, we modify them to * have the host hardware interrupt number instead of the virtual one programmed @@ -217,58 +245,37 @@ u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu) static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu, struct vgic_v3_cpu_if *s_cpu_if) { - unsigned long lr_map = 0; - int index = 0; + struct shadow_if *shadow_if; + + shadow_if = container_of(s_cpu_if, struct shadow_if, cpuif); + shadow_if->lr_map = 0; for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) { u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i)); - struct vgic_irq *irq; if (!(lr & ICH_LR_STATE)) - lr = 0; - - if (!(lr & ICH_LR_HW)) - goto next; - - /* We have the HW bit set, check for validity of pINTID */ - irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); - if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI ) { - /* There was no real mapping, so nuke the HW bit */ - lr &= ~ICH_LR_HW; - if (irq) - vgic_put_irq(vcpu->kvm, irq); - goto next; - } - - /* Translate the virtual mapping to the real one */ - lr &= ~ICH_LR_PHYS_ID_MASK; - lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid); + continue; - vgic_put_irq(vcpu->kvm, irq); + lr = translate_lr_pintid(vcpu, lr); -next: - s_cpu_if->vgic_lr[index] = lr; - if (lr) { - lr_map |= BIT(i); - index++; - } + s_cpu_if->vgic_lr[hweight16(shadow_if->lr_map)] = lr; + shadow_if->lr_map |= BIT(i); } - container_of(s_cpu_if, struct shadow_if, cpuif)->lr_map = lr_map; - s_cpu_if->used_lrs = index; + s_cpu_if->used_lrs = hweight16(shadow_if->lr_map); } void vgic_v3_sync_nested(struct kvm_vcpu *vcpu) { struct shadow_if *shadow_if = get_shadow_if(); - int i, index = 0; + int i; for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i)); struct vgic_irq *irq; if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE)) - goto next; + continue; /* * If we had a HW lr programmed by the guest hypervisor, we @@ -277,15 +284,13 @@ void vgic_v3_sync_nested(struct kvm_vcpu *vcpu) */ irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr)); if (WARN_ON(!irq)) /* Shouldn't happen as we check on load */ - goto next; + continue; - lr = __gic_v3_get_lr(index); + lr = __gic_v3_get_lr(lr_map_idx_to_shadow_idx(shadow_if, i)); if (!(lr & ICH_LR_STATE)) irq->active = false; vgic_put_irq(vcpu->kvm, irq); - next: - index++; } } @@ -356,25 +361,23 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu) val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2); val &= ~ICH_HCR_EL2_EOIcount_MASK; val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK); - __vcpu_sys_reg(vcpu, ICH_HCR_EL2) = val; - __vcpu_sys_reg(vcpu, ICH_VMCR_EL2) = s_cpu_if->vgic_vmcr; + __vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val); + __vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr); for (i = 0; i < 4; i++) { - __vcpu_sys_reg(vcpu, ICH_AP0RN(i)) = s_cpu_if->vgic_ap0r[i]; - __vcpu_sys_reg(vcpu, ICH_AP1RN(i)) = s_cpu_if->vgic_ap1r[i]; + __vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]); + __vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]); } for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) { val = __vcpu_sys_reg(vcpu, ICH_LRN(i)); val &= ~ICH_LR_STATE; - val |= s_cpu_if->vgic_lr[i] & ICH_LR_STATE; + val |= s_cpu_if->vgic_lr[lr_map_idx_to_shadow_idx(shadow_if, i)] & ICH_LR_STATE; - __vcpu_sys_reg(vcpu, ICH_LRN(i)) = val; - s_cpu_if->vgic_lr[i] = 0; + __vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val); } - shadow_if->lr_map = 0; vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0; } @@ -398,9 +401,7 @@ void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu) { bool level; - level = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En; - if (level) - level &= vgic_v3_get_misr(vcpu); + level = (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En) && vgic_v3_get_misr(vcpu); kvm_vgic_inject_irq(vcpu->kvm, vcpu, vcpu->kvm->arch.vgic.mi_intid, level, vcpu); } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 193946108192..4d9343d2b0b1 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -356,7 +356,7 @@ int vgic_v4_put(struct kvm_vcpu *vcpu) { struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; - if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) + if (!vgic_supports_direct_irqs(vcpu->kvm) || !vpe->resident) return 0; return its_make_vpe_non_resident(vpe, vgic_v4_want_doorbell(vcpu)); @@ -367,7 +367,7 @@ int vgic_v4_load(struct kvm_vcpu *vcpu) struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; int err; - if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident) + if (!vgic_supports_direct_irqs(vcpu->kvm) || vpe->resident) return 0; if (vcpu_get_flag(vcpu, IN_WFI)) @@ -527,28 +527,26 @@ static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq) return NULL; } -int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) +void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) { struct vgic_irq *irq; unsigned long flags; - int ret = 0; if (!vgic_supports_direct_msis(kvm)) - return 0; + return; irq = __vgic_host_irq_get_vlpi(kvm, host_irq); if (!irq) - return 0; + return; raw_spin_lock_irqsave(&irq->irq_lock, flags); WARN_ON(irq->hw && irq->host_irq != host_irq); if (irq->hw) { atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); irq->hw = false; - ret = its_unmap_vlpi(host_irq); + its_unmap_vlpi(host_irq); } raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(kvm, irq); - return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-v5.c b/arch/arm64/kvm/vgic/vgic-v5.c new file mode 100644 index 000000000000..6bdbb221bcde --- /dev/null +++ b/arch/arm64/kvm/vgic/vgic-v5.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <kvm/arm_vgic.h> +#include <linux/irqchip/arm-vgic-info.h> + +#include "vgic.h" + +/* + * Probe for a vGICv5 compatible interrupt controller, returning 0 on success. + * Currently only supports GICv3-based VMs on a GICv5 host, and hence only + * registers a VGIC_V3 device. + */ +int vgic_v5_probe(const struct gic_kvm_info *info) +{ + u64 ich_vtr_el2; + int ret; + + if (!info->has_gcie_v3_compat) + return -ENODEV; + + kvm_vgic_global_state.type = VGIC_V5; + kvm_vgic_global_state.has_gcie_v3_compat = true; + + /* We only support v3 compat mode - use vGICv3 limits */ + kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS; + + kvm_vgic_global_state.vcpu_base = 0; + kvm_vgic_global_state.vctrl_base = NULL; + kvm_vgic_global_state.can_emulate_gicv2 = false; + kvm_vgic_global_state.has_gicv4 = false; + kvm_vgic_global_state.has_gicv4_1 = false; + + ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config); + kvm_vgic_global_state.ich_vtr_el2 = (u32)ich_vtr_el2; + + /* + * The ListRegs field is 5 bits, but there is an architectural + * maximum of 16 list registers. Just ignore bit 4... + */ + kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1; + + ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3); + if (ret) { + kvm_err("Cannot register GICv3-legacy KVM device.\n"); + return ret; + } + + static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif); + kvm_info("GCIE legacy system register CPU interface\n"); + + return 0; +} diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 8f8096d48925..f5148b38120a 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -951,7 +951,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) * can be directly injected (GICv4). */ if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && - !vgic_supports_direct_msis(vcpu->kvm)) + !vgic_supports_direct_irqs(vcpu->kvm)) return; DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); @@ -965,7 +965,7 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) if (can_access_vgic_from_kernel()) vgic_restore_state(vcpu); - if (vgic_supports_direct_msis(vcpu->kvm)) + if (vgic_supports_direct_irqs(vcpu->kvm)) vgic_v4_commit(vcpu); } diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 4349084cb9a6..1384a04c0784 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -64,6 +64,24 @@ KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \ KVM_REG_ARM_VGIC_SYSREG_OP2_MASK) +#define KVM_ICC_SRE_EL2 (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE | \ + ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB) +#define KVM_ICH_VTR_EL2_RES0 (ICH_VTR_EL2_DVIM | \ + ICH_VTR_EL2_A3V | \ + ICH_VTR_EL2_IDbits) +#define KVM_ICH_VTR_EL2_RES1 ICH_VTR_EL2_nV4 + +static inline u64 kvm_get_guest_vtr_el2(void) +{ + u64 vtr; + + vtr = kvm_vgic_global_state.ich_vtr_el2; + vtr &= ~KVM_ICH_VTR_EL2_RES0; + vtr |= KVM_ICH_VTR_EL2_RES1; + + return vtr; +} + /* * As per Documentation/virt/kvm/devices/arm-vgic-its.rst, * below macros are defined for ITS table entry encoding. @@ -297,6 +315,7 @@ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr, bool is_write); int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); +const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz); int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, u32 intid, u32 *val); int kvm_register_vgic_device(unsigned long type); @@ -308,6 +327,8 @@ int vgic_init(struct kvm *kvm); void vgic_debug_init(struct kvm *kvm); void vgic_debug_destroy(struct kvm *kvm); +int vgic_v5_probe(const struct gic_kvm_info *info); + static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu) { struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu; @@ -369,7 +390,23 @@ void vgic_its_invalidate_all_caches(struct kvm *kvm); int vgic_its_inv_lpi(struct kvm *kvm, struct vgic_irq *irq); int vgic_its_invall(struct kvm_vcpu *vcpu); +bool system_supports_direct_sgis(void); bool vgic_supports_direct_msis(struct kvm *kvm); +bool vgic_supports_direct_sgis(struct kvm *kvm); + +static inline bool vgic_supports_direct_irqs(struct kvm *kvm) +{ + /* + * Deliberately conflate vLPI and vSGI support on GICv4.1 hardware, + * indirectly allowing userspace to control whether or not vPEs are + * allocated for the VM. + */ + if (system_supports_direct_sgis()) + return vgic_supports_direct_sgis(kvm); + + return vgic_supports_direct_msis(kvm); +} + int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); @@ -389,6 +426,17 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu); void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu); void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu); +static inline bool vgic_is_v3_compat(struct kvm *kvm) +{ + return cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF) && + kvm_vgic_global_state.has_gcie_v3_compat; +} + +static inline bool vgic_is_v3(struct kvm *kvm) +{ + return kvm_vgic_global_state.type == VGIC_V3 || vgic_is_v3_compat(kvm); +} + int vgic_its_debug_init(struct kvm_device *dev); void vgic_its_debug_destroy(struct kvm_device *dev); |