summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/Kconfig8
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c7
-rw-r--r--arch/arm64/kvm/arm.c35
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h9
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S20
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c12
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c72
-rw-r--r--arch/arm64/kvm/pmu-emul.c12
-rw-r--r--arch/arm64/kvm/sys_regs.c97
-rw-r--r--arch/arm64/kvm/va_layout.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c11
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c20
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c21
15 files changed, 169 insertions, 170 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..3964acf5451e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -49,14 +49,6 @@ if KVM
source "virt/kvm/Kconfig"
-config KVM_ARM_PMU
- bool "Virtual Performance Monitoring Unit (PMU) support"
- depends on HW_PERF_EVENTS
- default y
- help
- Adds support for a virtual Performance Monitoring Unit (PMU) in
- virtual machines.
-
endif # KVM
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 60fd181df624..13b017284bf9 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
vgic/vgic-its.o vgic/vgic-debug.o
-kvm-$(CONFIG_KVM_ARM_PMU) += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 32ba6fbc3814..74e0699661e9 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
if (!irqchip_in_kernel(vcpu->kvm))
goto no_vgic;
- if (!vgic_initialized(vcpu->kvm))
- return -ENODEV;
-
+ /*
+ * At this stage, we have the guarantee that the vgic is both
+ * available and initialized.
+ */
if (!timer_irqs_are_valid(vcpu)) {
kvm_debug("incorrectly configured timer irqs\n");
return -EINVAL;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 6e637d2b4cfb..fe60d25c000e 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -65,10 +65,6 @@ static bool vgic_present;
static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* Map the VGIC hardware resources before running a vcpu the
* first time on this VM.
*/
- if (unlikely(!vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
- }
+ ret = kvm_vgic_map_resources(kvm);
+ if (ret)
+ return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
@@ -1402,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
+ * Also drop the KASAN tag which gets in the way...
*/
- params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
@@ -1574,12 +1569,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
.notifier_call = hyp_init_cpu_pm_notifier,
};
-static void __init hyp_cpu_pm_init(void)
+static void hyp_cpu_pm_init(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
}
-static void __init hyp_cpu_pm_exit(void)
+static void hyp_cpu_pm_exit(void)
{
if (!is_protected_kvm_enabled())
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
@@ -1604,9 +1599,12 @@ static void init_cpu_logical_map(void)
* allow any other CPUs from the `possible` set to boot.
*/
for_each_online_cpu(cpu)
- kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+ hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
}
+#define init_psci_0_1_impl_state(config, what) \
+ config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
static bool init_psci_relay(void)
{
/*
@@ -1618,8 +1616,15 @@ static bool init_psci_relay(void)
return false;
}
- kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
- kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+ kvm_host_psci_config.version = psci_ops.get_version();
+
+ if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+ kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+ init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+ init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+ }
return true;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index b1f60923a8fe..61716359035d 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
}
}
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+ write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 31b060a44045..b17bf19217f1 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -47,6 +47,8 @@ __invalid:
b .
/*
+ * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
+ *
* x0: SMCCC function ID
* x1: struct kvm_nvhe_init_params PA
*/
@@ -70,9 +72,9 @@ __do_hyp_init:
eret
1: mov x0, x1
- mov x4, lr
- bl ___kvm_hyp_init
- mov lr, x4
+ mov x3, lr
+ bl ___kvm_hyp_init // Clobbers x0..x2
+ mov lr, x3
/* Hello, World! */
mov x0, #SMCCC_RET_SUCCESS
@@ -82,8 +84,8 @@ SYM_CODE_END(__kvm_hyp_init)
/*
* Initialize the hypervisor in EL2.
*
- * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
- * and leave x4 for the caller.
+ * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
+ * and leave x3 for the caller.
*
* x0: struct kvm_nvhe_init_params PA
*/
@@ -112,9 +114,9 @@ alternative_else_nop_endif
/*
* Set the PS bits in TCR_EL2.
*/
- ldr x1, [x0, #NVHE_INIT_TCR_EL2]
- tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
- msr tcr_el2, x1
+ ldr x0, [x0, #NVHE_INIT_TCR_EL2]
+ tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
+ msr tcr_el2, x0
isb
@@ -193,7 +195,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/* Enable MMU, set vectors and stack. */
mov x0, x28
- bl ___kvm_hyp_init // Clobbers x0..x3
+ bl ___kvm_hyp_init // Clobbers x0..x2
/* Leave idmap. */
mov x0, x29
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index bde658d51404..a906f9e2ff34 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
__kvm_hyp_host_forward_smc(host_ctxt);
}
-static void skip_host_instruction(void)
-{
- write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);
- /*
- * Unlike HVC, the return address of an SMC is the instruction's PC.
- * Move the return address past the instruction.
- */
- skip_host_instruction();
+ /* SMC was trapped, move ELR past the current PC. */
+ kvm_skip_host_instr();
}
void handle_trap(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index cbab0c6246e2..2997aa156d8e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -14,14 +14,14 @@
* Other CPUs should not be allowed to boot because their features were
* not checked against the finalized system capabilities.
*/
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
u64 cpu_logical_map(unsigned int cpu)
{
- if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+ if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
hyp_panic();
- return __cpu_logical_map[cpu];
+ return hyp_cpu_logical_map[cpu];
}
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 08dc9de69314..8e7128cb7667 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -7,11 +7,8 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
#include <uapi/linux/psci.h>
#include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
/* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
@@ -47,19 +43,16 @@ struct psci_boot_args {
static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
-
- return func_id;
-}
+#define is_psci_0_1(what, func_id) \
+ (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
+ (func_id) == kvm_host_psci_config.function_ids_0_1.what)
static bool is_psci_0_1_call(u64 func_id)
{
- return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
- (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate);
+ return (is_psci_0_1(cpu_suspend, func_id) ||
+ is_psci_0_1(cpu_on, func_id) ||
+ is_psci_0_1(cpu_off, func_id) ||
+ is_psci_0_1(migrate, func_id));
}
static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
(PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
}
-static bool is_psci_call(u64 func_id)
-{
- switch (kvm_host_psci_version) {
- case PSCI_VERSION(0, 1):
- return is_psci_0_1_call(func_id);
- default:
- return is_psci_0_2_call(func_id);
- }
-}
-
static unsigned long psci_call(unsigned long fn, unsigned long arg0,
unsigned long arg1, unsigned long arg2)
{
@@ -94,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
}
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
- psci_forward(host_ctxt);
- hyp_panic(); /* unreachable */
-}
-
static unsigned int find_cpu_id(u64 mpidr)
{
unsigned int i;
@@ -248,15 +225,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
{
- if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
- (func_id == kvm_host_psci_0_1_function_ids.migrate))
+ if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
return psci_forward(host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+ if (is_psci_0_1(cpu_on, func_id))
return psci_cpu_on(func_id, host_ctxt);
- else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+ if (is_psci_0_1(cpu_suspend, func_id))
return psci_cpu_suspend(func_id, host_ctxt);
- else
- return PSCI_RET_NOT_SUPPORTED;
+
+ return PSCI_RET_NOT_SUPPORTED;
}
static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -269,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
return psci_forward(host_ctxt);
+ /*
+ * SYSTEM_OFF/RESET should not return according to the spec.
+ * Allow it so as to stay robust to broken firmware.
+ */
case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET:
- psci_forward_noreturn(host_ctxt);
- unreachable();
+ return psci_forward(host_ctxt);
case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON:
@@ -298,20 +277,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
{
- u64 func_id = get_psci_func_id(host_ctxt);
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
- if (!is_psci_call(func_id))
- return false;
-
- switch (kvm_host_psci_version) {
+ switch (kvm_host_psci_config.version) {
case PSCI_VERSION(0, 1):
+ if (!is_psci_0_1_call(func_id))
+ return false;
ret = psci_0_1_handler(func_id, host_ctxt);
break;
case PSCI_VERSION(0, 2):
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_0_2_handler(func_id, host_ctxt);
break;
default:
+ if (!is_psci_0_2_call(func_id))
+ return false;
ret = psci_1_0_handler(func_id, host_ctxt);
break;
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 398f6df1bbe4..247422ac78a9 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
{
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
u64 val, mask = 0;
- int base, i;
+ int base, i, nr_events;
if (!pmceid1) {
val = read_sysreg(pmceid0_el0);
@@ -801,13 +801,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
if (!bmap)
return val;
+ nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+
for (i = 0; i < 32; i += 8) {
u64 byte;
byte = bitmap_get_value8(bmap, base + i);
mask |= byte << i;
- byte = bitmap_get_value8(bmap, 0x4000 + base + i);
- mask |= byte << (32 + i);
+ if (nr_events >= (0x4000 + base + 32)) {
+ byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+ mask |= byte << (32 + i);
+ }
}
return val & mask;
@@ -850,8 +854,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- kvm_pmu_vcpu_reset(vcpu);
-
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3313dedfa505..7c4f79532406 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -43,6 +43,10 @@
* 64bit interface.
*/
+#define reg_to_encoding(x) \
+ sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
+ (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
static bool read_from_write_only(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
@@ -273,8 +277,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
- u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 sr = reg_to_encoding(r);
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
kvm_inject_undefined(vcpu);
@@ -590,10 +593,23 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
}
+static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (kvm_vcpu_has_pmu(vcpu))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 pmcr, val;
+ /* No PMU available, PMCR_EL0 may UNDEF... */
+ if (!kvm_arm_support_pmu_v3())
+ return;
+
pmcr = read_sysreg(pmcr_el0);
/*
* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -609,9 +625,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
{
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
- bool enabled = kvm_vcpu_has_pmu(vcpu);
+ bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
- enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
if (!enabled)
kvm_inject_undefined(vcpu);
@@ -896,11 +911,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- if (!kvm_vcpu_has_pmu(vcpu)) {
- kvm_inject_undefined(vcpu);
- return false;
- }
-
if (p->is_write) {
if (!vcpu_mode_priv(vcpu)) {
kvm_inject_undefined(vcpu);
@@ -917,10 +927,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
-#define reg_to_encoding(x) \
- sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
- (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
-
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
@@ -932,15 +938,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
+#define PMU_SYS_REG(r) \
+ SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
- { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
- access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
+ .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
/* Macro to expand the PMEVTYPERn_EL0 register */
#define PMU_PMEVTYPER_EL0(n) \
- { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
- access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+ { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
+ .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
const struct sys_reg_desc *r)
@@ -1016,8 +1025,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
if (id == SYS_ID_AA64PFR0_EL1) {
@@ -1058,8 +1066,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
const struct sys_reg_desc *r)
{
- u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
- (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u32 id = reg_to_encoding(r);
switch (id) {
case SYS_ID_AA64ZFR0_EL1:
@@ -1482,8 +1489,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
- { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
- { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENSET_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
+ { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
+ .access = access_pminten, .reg = PMINTENSET_EL1 },
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1522,23 +1531,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
- { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
- { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
- { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
- { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
- { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
- { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
- { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
- { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+ { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
+ .reset = reset_pmcr, .reg = PMCR_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
+ .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+ { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMSWINC_EL0),
+ .access = access_pmswinc, .reg = PMSWINC_EL0 },
+ { PMU_SYS_REG(SYS_PMSELR_EL0),
+ .access = access_pmselr, .reg = PMSELR_EL0 },
+ { PMU_SYS_REG(SYS_PMCEID0_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCEID1_EL0),
+ .access = access_pmceid, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+ .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+ { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+ .access = access_pmu_evtyper, .reset = NULL },
+ { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+ .access = access_pmu_evcntr, .reset = NULL },
/*
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
- { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+ { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
+ .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
+ { PMU_SYS_REG(SYS_PMOVSSET_EL0),
+ .access = access_pmovs, .reg = PMOVSSET_EL0 },
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
@@ -1690,7 +1712,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
* in 32bit mode. Here we choose to reset it as zero for consistency.
*/
- { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+ { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
+ .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index d8cc51bd60bf..70fcd6a12fe1 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
}
/*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
*/
static void init_hyp_physvirt_offset(void)
{
- extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
u64 kern_va, hyp_va;
/* Compute the offset from the hyp VA and PA of a random symbol. */
- kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+ kern_va = (u64)lm_alias(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va);
- CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+ hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
}
/*
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 32e32d67a127..052917deb149 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
* Map the MMIO regions depending on the VGIC model exposed to the guest
* called on the first VCPU run.
* Also map the virtual CPU interface into the VM.
- * v2/v3 derivatives call vgic_init if not already done.
+ * v2 calls vgic_init() if not already done.
+ * v3 and derivatives return an error if the VGIC is not initialized.
* vgic_ready() returns true if this function has succeeded.
* @kvm: kvm struct pointer
*/
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
+ if (likely(vgic_ready(kvm)))
+ return 0;
+
mutex_lock(&kvm->lock);
+ if (vgic_ready(kvm))
+ goto out;
+
if (!irqchip_in_kernel(kvm))
goto out;
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
if (ret)
__kvm_vgic_destroy(kvm);
+ else
+ dist->ready = true;
out:
mutex_unlock(&kvm->lock);
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index ebf53a4e1296..11934c2af2f4 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
- if (vgic_ready(kvm))
- goto out;
-
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to initialize VGIC dynamic data structures\n");
- goto out;
+ return ret;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
- goto out;
+ return ret;
}
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
KVM_VGIC_V2_CPU_SIZE, true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
- goto out;
+ return ret;
}
}
- dist->ready = true;
-
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 9cdf39a94a63..52915b342351 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
int ret = 0;
int c;
- if (vgic_ready(kvm))
- goto out;
-
kvm_for_each_vcpu(c, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
kvm_debug("vcpu %d redistributor base not set\n", c);
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
}
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n");
- ret = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/*
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
* the VGIC before we need to use it.
*/
if (!vgic_initialized(kvm)) {
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
- goto out;
+ return ret;
}
if (kvm_vgic_global_state.has_gicv4_1)
vgic_v4_configure_vsgis(kvm);
- dist->ready = true;
-out:
- return ret;
+ return 0;
}
DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);