summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/guest.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/guest.c')
-rw-r--r--arch/arm64/kvm/guest.c415
1 files changed, 383 insertions, 32 deletions
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index dd436a50fce7..3ae2f82fca46 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -19,18 +19,25 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bits.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/nospec.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
+#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include <asm/kvm_host.h>
+#include <asm/sigcontext.h>
#include "trace.h"
@@ -52,12 +59,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
+static bool core_reg_offset_is_vreg(u64 off)
+{
+ return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) &&
+ off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr);
+}
+
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
-static int validate_core_offset(const struct kvm_one_reg *reg)
+static int validate_core_offset(const struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
{
u64 off = core_reg_offset_from_id(reg->id);
int size;
@@ -89,11 +103,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg)
return -EINVAL;
}
- if (KVM_REG_SIZE(reg->id) == size &&
- IS_ALIGNED(off, size / sizeof(__u32)))
- return 0;
+ if (KVM_REG_SIZE(reg->id) != size ||
+ !IS_ALIGNED(off, size / sizeof(__u32)))
+ return -EINVAL;
- return -EINVAL;
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off))
+ return -EINVAL;
+
+ return 0;
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
@@ -115,7 +137,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ if (validate_core_offset(vcpu, reg))
return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
@@ -140,7 +162,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
- if (validate_core_offset(reg))
+ if (validate_core_offset(vcpu, reg))
return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
@@ -183,6 +205,239 @@ out:
return err;
}
+#define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64)
+#define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64)
+
+static bool vq_present(
+ const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS],
+ unsigned int vq)
+{
+ return (*vqs)[vq_word(vq)] & vq_mask(vq);
+}
+
+static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
+ return -EINVAL;
+
+ memset(vqs, 0, sizeof(vqs));
+
+ max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (sve_vq_available(vq))
+ vqs[vq_word(vq)] |= vq_mask(vq);
+
+ if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ unsigned int max_vq, vq;
+ u64 vqs[KVM_ARM64_SVE_VLS_WORDS];
+
+ if (!vcpu_has_sve(vcpu))
+ return -ENOENT;
+
+ if (kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM; /* too late! */
+
+ if (WARN_ON(vcpu->arch.sve_state))
+ return -EINVAL;
+
+ if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs)))
+ return -EFAULT;
+
+ max_vq = 0;
+ for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq)
+ if (vq_present(&vqs, vq))
+ max_vq = vq;
+
+ if (max_vq > sve_vq_from_vl(kvm_sve_max_vl))
+ return -EINVAL;
+
+ /*
+ * Vector lengths supported by the host can't currently be
+ * hidden from the guest individually: instead we can only set a
+ * maxmium via ZCR_EL2.LEN. So, make sure the available vector
+ * lengths match the set requested exactly up to the requested
+ * maximum:
+ */
+ for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
+ if (vq_present(&vqs, vq) != sve_vq_available(vq))
+ return -EINVAL;
+
+ /* Can't run with no vector lengths at all: */
+ if (max_vq < SVE_VQ_MIN)
+ return -EINVAL;
+
+ /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
+ vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
+
+ return 0;
+}
+
+#define SVE_REG_SLICE_SHIFT 0
+#define SVE_REG_SLICE_BITS 5
+#define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS)
+#define SVE_REG_ID_BITS 5
+
+#define SVE_REG_SLICE_MASK \
+ GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \
+ SVE_REG_SLICE_SHIFT)
+#define SVE_REG_ID_MASK \
+ GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT)
+
+#define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS)
+
+#define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0))
+#define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0))
+
+/*
+ * Number of register slices required to cover each whole SVE register.
+ * NOTE: Only the first slice every exists, for now.
+ * If you are tempted to modify this, you must also rework sve_reg_to_region()
+ * to match:
+ */
+#define vcpu_sve_slices(vcpu) 1
+
+/* Bounds of a single SVE register slice within vcpu->arch.sve_state */
+struct sve_state_reg_region {
+ unsigned int koffset; /* offset into sve_state in kernel memory */
+ unsigned int klen; /* length in kernel memory */
+ unsigned int upad; /* extra trailing padding in user memory */
+};
+
+/*
+ * Validate SVE register ID and get sanitised bounds for user/kernel SVE
+ * register copy
+ */
+static int sve_reg_to_region(struct sve_state_reg_region *region,
+ struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ /* reg ID ranges for Z- registers */
+ const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0);
+ const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* reg ID ranges for P- registers and FFR (which are contiguous) */
+ const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0);
+ const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1);
+
+ unsigned int vq;
+ unsigned int reg_num;
+
+ unsigned int reqoffset, reqlen; /* User-requested offset and length */
+ unsigned int maxlen; /* Maxmimum permitted length */
+
+ size_t sve_state_size;
+
+ const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1,
+ SVE_NUM_SLICES - 1);
+
+ /* Verify that the P-regs and FFR really do have contiguous IDs: */
+ BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1);
+
+ /* Verify that we match the UAPI header: */
+ BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES);
+
+ reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT;
+
+ if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
+ reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_ZREG_SIZE;
+ maxlen = SVE_SIG_ZREG_SIZE(vq);
+ } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) {
+ if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
+ return -ENOENT;
+
+ vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+
+ reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
+ SVE_SIG_REGS_OFFSET;
+ reqlen = KVM_SVE_PREG_SIZE;
+ maxlen = SVE_SIG_PREG_SIZE(vq);
+ } else {
+ return -EINVAL;
+ }
+
+ sve_state_size = vcpu_sve_state_size(vcpu);
+ if (WARN_ON(!sve_state_size))
+ return -EINVAL;
+
+ region->koffset = array_index_nospec(reqoffset, sve_state_size);
+ region->klen = min(maxlen, reqlen);
+ region->upad = reqlen - region->klen;
+
+ return 0;
+}
+
+static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ char __user *uptr = (char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return get_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(&region, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
+ region.klen) ||
+ clear_user(uptr + region.klen, region.upad))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+ int ret;
+ struct sve_state_reg_region region;
+ const char __user *uptr = (const char __user *)reg->addr;
+
+ /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */
+ if (reg->id == KVM_REG_ARM64_SVE_VLS)
+ return set_sve_vls(vcpu, reg);
+
+ /* Try to interpret reg ID as an architectural SVE register... */
+ ret = sve_reg_to_region(&region, vcpu, reg);
+ if (ret)
+ return ret;
+
+ if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ return -EPERM;
+
+ if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
+ region.klen))
+ return -EFAULT;
+
+ return 0;
+}
+
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
@@ -193,9 +448,37 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return -EINVAL;
}
-static unsigned long num_core_regs(void)
+static int copy_core_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int i;
+ int n = 0;
+ const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
+
+ for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
+ /*
+ * The KVM_REG_ARM64_SVE regs must be used instead of
+ * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on
+ * SVE-enabled vcpus:
+ */
+ if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i))
+ continue;
+
+ if (uindices) {
+ if (put_user(core_reg | i, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_core_regs(const struct kvm_vcpu *vcpu)
{
- return sizeof(struct kvm_regs) / sizeof(__u32);
+ return copy_core_reg_indices(vcpu, NULL);
}
/**
@@ -251,6 +534,67 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
+static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
+ + 1; /* KVM_REG_ARM64_SVE_VLS */
+}
+
+static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ const unsigned int slices = vcpu_sve_slices(vcpu);
+ u64 reg;
+ unsigned int i, n;
+ int num_regs = 0;
+
+ if (!vcpu_has_sve(vcpu))
+ return 0;
+
+ /* Policed by KVM_GET_REG_LIST: */
+ WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+
+ /*
+ * Enumerate this first, so that userspace can save/restore in
+ * the order reported by KVM_GET_REG_LIST:
+ */
+ reg = KVM_REG_ARM64_SVE_VLS;
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ ++num_regs;
+
+ for (i = 0; i < slices; i++) {
+ for (n = 0; n < SVE_NUM_ZREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_ZREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ for (n = 0; n < SVE_NUM_PREGS; n++) {
+ reg = KVM_REG_ARM64_SVE_PREG(n, i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ reg = KVM_REG_ARM64_SVE_FFR(i);
+ if (put_user(reg, uindices++))
+ return -EFAULT;
+ num_regs++;
+ }
+
+ return num_regs;
+}
+
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
@@ -258,8 +602,15 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
- return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
- + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
+ unsigned long res = 0;
+
+ res += num_core_regs(vcpu);
+ res += num_sve_regs(vcpu);
+ res += kvm_arm_num_sys_reg_descs(vcpu);
+ res += kvm_arm_get_fw_num_regs(vcpu);
+ res += NUM_TIMER_REGS;
+
+ return res;
}
/**
@@ -269,23 +620,25 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
- unsigned int i;
- const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE;
int ret;
- for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) {
- if (put_user(core_reg | i, uindices))
- return -EFAULT;
- uindices++;
- }
+ ret = copy_core_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sve_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
- if (ret)
+ if (ret < 0)
return ret;
uindices += NUM_TIMER_REGS;
@@ -298,12 +651,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we want a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return get_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_get_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@@ -317,12 +669,11 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32)
return -EINVAL;
- /* Register group 16 means we set a core register. */
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
- return set_core_reg(vcpu, reg);
-
- if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
- return kvm_arm_set_fw_reg(vcpu, reg);
+ switch (reg->id & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg);
+ case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg);
+ case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg);
+ }
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);