summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 10:56:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 10:56:56 -0800
commitc9b012e5f4a1d01dfa8abc6318211a67ba7d5db2 (patch)
tree97b2f1c654fc4333e9e3111f76a26ec5503ee5b9 /arch/arm64/kvm
parentb293fca43be544483b6488d33ad4b3ed55881064 (diff)
parent6cfa7cc46b1a7a15d81d5389c99cfca633c12b8e (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "The big highlight is support for the Scalable Vector Extension (SVE) which required extensive ABI work to ensure we don't break existing applications by blowing away their signal stack with the rather large new vector context (<= 2 kbit per vector register). There's further work to be done optimising things like exception return, but the ABI is solid now. Much of the line count comes from some new PMU drivers we have, but they're pretty self-contained and I suspect we'll have more of them in future. Plenty of acronym soup here: - initial support for the Scalable Vector Extension (SVE) - improved handling for SError interrupts (required to handle RAS events) - enable GCC support for 128-bit integer types - remove kernel text addresses from backtraces and register dumps - use of WFE to implement long delay()s - ACPI IORT updates from Lorenzo Pieralisi - perf PMU driver for the Statistical Profiling Extension (SPE) - perf PMU driver for Hisilicon's system PMUs - misc cleanups and non-critical fixes" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (97 commits) arm64: Make ARMV8_DEPRECATED depend on SYSCTL arm64: Implement __lshrti3 library function arm64: support __int128 on gcc 5+ arm64/sve: Add documentation arm64/sve: Detect SVE and activate runtime support arm64/sve: KVM: Hide SVE from CPU features exposed to guests arm64/sve: KVM: Treat guest SVE use as undefined instruction execution arm64/sve: KVM: Prevent guests from using SVE arm64/sve: Add sysctl to set the default vector length for new processes arm64/sve: Add prctl controls for userspace vector length management arm64/sve: ptrace and ELF coredump support arm64/sve: Preserve SVE registers around EFI runtime service calls arm64/sve: Preserve SVE registers around kernel-mode NEON use arm64/sve: Probe SVE capabilities and usable vector lengths arm64: cpufeature: Move sys_caps_initialised declarations arm64/sve: Backend logic for setting the vector length arm64/sve: Signal handling support arm64/sve: Support vector length resetting for new processes arm64/sve: Core task context handling arm64/sve: Low-level CPU setup ...
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/handle_exit.c8
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c24
-rw-r--r--arch/arm64/kvm/hyp/switch.c12
-rw-r--r--arch/arm64/kvm/sys_regs.c292
4 files changed, 271 insertions, 65 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 7debb74843a0..b71247995469 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -147,6 +147,13 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /* Until SVE is supported for guests: */
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -160,6 +167,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_ELx_EC_SMC64] = handle_smc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
+ [ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index f5154ed3da6c..321c9c05dd9e 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -65,16 +65,6 @@
default: write_debug(ptr[0], reg, 0); \
}
-#define PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
-
-#define PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
-#define PMBLIMITR_EL1_E BIT(0)
-
-#define PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
-#define PMBIDR_EL1_P BIT(4)
-
-#define psb_csync() asm volatile("hint #17")
-
static void __hyp_text __debug_save_spe_vhe(u64 *pmscr_el1)
{
/* The vcpu can run. but it can't hide. */
@@ -90,18 +80,18 @@ static void __hyp_text __debug_save_spe_nvhe(u64 *pmscr_el1)
return;
/* Yes; is it owned by EL3? */
- reg = read_sysreg_s(PMBIDR_EL1);
- if (reg & PMBIDR_EL1_P)
+ reg = read_sysreg_s(SYS_PMBIDR_EL1);
+ if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
return;
/* No; is the host actually using the thing? */
- reg = read_sysreg_s(PMBLIMITR_EL1);
- if (!(reg & PMBLIMITR_EL1_E))
+ reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
+ if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(PMSCR_EL1);
- write_sysreg_s(0, PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
+ write_sysreg_s(0, SYS_PMSCR_EL1);
isb();
/* Now drain all buffered data to memory */
@@ -122,7 +112,7 @@ static void __hyp_text __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, PMSCR_EL1);
+ write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 945e79c641c4..951f3ebaff26 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -48,7 +48,7 @@ static void __hyp_text __activate_traps_vhe(void)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~CPACR_EL1_FPEN;
+ val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
write_sysreg(val, cpacr_el1);
write_sysreg(__kvm_hyp_vector, vbar_el1);
@@ -59,7 +59,7 @@ static void __hyp_text __activate_traps_nvhe(void)
u64 val;
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
write_sysreg(val, cptr_el2);
}
@@ -81,11 +81,17 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
* it will cause an exception.
*/
val = vcpu->arch.hcr_el2;
+
if (!(val & HCR_RW) && system_supports_fpsimd()) {
write_sysreg(1 << 30, fpexc32_el2);
isb();
}
+
+ if (val & HCR_RW) /* for AArch64 only: */
+ val |= HCR_TID3; /* TID3: trap feature register accesses */
+
write_sysreg(val, hcr_el2);
+
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
/*
@@ -111,7 +117,7 @@ static void __hyp_text __deactivate_traps_vhe(void)
write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
- write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
+ write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2e070d3baf9f..a0ee9b05e3d4 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -23,6 +23,7 @@
#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
+#include <linux/printk.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -892,6 +893,146 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
return true;
}
+/* Read a sanitised cpufeature ID register by sys_reg_desc */
+static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
+{
+ u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
+ (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+ u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
+
+ if (id == SYS_ID_AA64PFR0_EL1) {
+ if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
+ pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
+ task_pid_nr(current));
+
+ val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
+ }
+
+ return val;
+}
+
+/* cpufeature ID register access trap handlers */
+
+static bool __access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r,
+ bool raz)
+{
+ if (p->is_write)
+ return write_to_read_only(vcpu, p, r);
+
+ p->regval = read_id_reg(r, raz);
+ return true;
+}
+
+static bool access_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return __access_id_reg(vcpu, p, r, false);
+}
+
+static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ return __access_id_reg(vcpu, p, r, true);
+}
+
+static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
+static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
+static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
+
+/*
+ * cpufeature ID register user accessors
+ *
+ * For now, these registers are immutable for userspace, so no values
+ * are stored, and for set_id_reg() we don't allow the effective value
+ * to be changed.
+ */
+static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+ bool raz)
+{
+ const u64 id = sys_reg_to_index(rd);
+ const u64 val = read_id_reg(rd, raz);
+
+ return reg_to_user(uaddr, &val, id);
+}
+
+static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
+ bool raz)
+{
+ const u64 id = sys_reg_to_index(rd);
+ int err;
+ u64 val;
+
+ err = reg_from_user(&val, uaddr, id);
+ if (err)
+ return err;
+
+ /* This is what we mean by invariant: you can't change it. */
+ if (val != read_id_reg(rd, raz))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __get_id_reg(rd, uaddr, false);
+}
+
+static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __set_id_reg(rd, uaddr, false);
+}
+
+static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __get_id_reg(rd, uaddr, true);
+}
+
+static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ return __set_id_reg(rd, uaddr, true);
+}
+
+/* sys_reg_desc initialiser for known cpufeature ID registers */
+#define ID_SANITISED(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = access_id_reg, \
+ .get_user = get_id_reg, \
+ .set_user = set_id_reg, \
+}
+
+/*
+ * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
+ * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
+ * (1 <= crm < 8, 0 <= Op2 < 8).
+ */
+#define ID_UNALLOCATED(crm, op2) { \
+ Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
+ .access = access_raz_id_reg, \
+ .get_user = get_raz_id_reg, \
+ .set_user = set_raz_id_reg, \
+}
+
+/*
+ * sys_reg_desc initialiser for known ID registers that we hide from guests.
+ * For now, these are exposed just like unallocated ID regs: they appear
+ * RAZ for the guest.
+ */
+#define ID_HIDDEN(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = access_raz_id_reg, \
+ .get_user = get_raz_id_reg, \
+ .set_user = set_raz_id_reg, \
+}
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -944,6 +1085,84 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
+
+ /*
+ * ID regs: all ID_SANITISED() entries here must have corresponding
+ * entries in arm64_ftr_regs[].
+ */
+
+ /* AArch64 mappings of the AArch32 ID registers */
+ /* CRm=1 */
+ ID_SANITISED(ID_PFR0_EL1),
+ ID_SANITISED(ID_PFR1_EL1),
+ ID_SANITISED(ID_DFR0_EL1),
+ ID_HIDDEN(ID_AFR0_EL1),
+ ID_SANITISED(ID_MMFR0_EL1),
+ ID_SANITISED(ID_MMFR1_EL1),
+ ID_SANITISED(ID_MMFR2_EL1),
+ ID_SANITISED(ID_MMFR3_EL1),
+
+ /* CRm=2 */
+ ID_SANITISED(ID_ISAR0_EL1),
+ ID_SANITISED(ID_ISAR1_EL1),
+ ID_SANITISED(ID_ISAR2_EL1),
+ ID_SANITISED(ID_ISAR3_EL1),
+ ID_SANITISED(ID_ISAR4_EL1),
+ ID_SANITISED(ID_ISAR5_EL1),
+ ID_SANITISED(ID_MMFR4_EL1),
+ ID_UNALLOCATED(2,7),
+
+ /* CRm=3 */
+ ID_SANITISED(MVFR0_EL1),
+ ID_SANITISED(MVFR1_EL1),
+ ID_SANITISED(MVFR2_EL1),
+ ID_UNALLOCATED(3,3),
+ ID_UNALLOCATED(3,4),
+ ID_UNALLOCATED(3,5),
+ ID_UNALLOCATED(3,6),
+ ID_UNALLOCATED(3,7),
+
+ /* AArch64 ID registers */
+ /* CRm=4 */
+ ID_SANITISED(ID_AA64PFR0_EL1),
+ ID_SANITISED(ID_AA64PFR1_EL1),
+ ID_UNALLOCATED(4,2),
+ ID_UNALLOCATED(4,3),
+ ID_UNALLOCATED(4,4),
+ ID_UNALLOCATED(4,5),
+ ID_UNALLOCATED(4,6),
+ ID_UNALLOCATED(4,7),
+
+ /* CRm=5 */
+ ID_SANITISED(ID_AA64DFR0_EL1),
+ ID_SANITISED(ID_AA64DFR1_EL1),
+ ID_UNALLOCATED(5,2),
+ ID_UNALLOCATED(5,3),
+ ID_HIDDEN(ID_AA64AFR0_EL1),
+ ID_HIDDEN(ID_AA64AFR1_EL1),
+ ID_UNALLOCATED(5,6),
+ ID_UNALLOCATED(5,7),
+
+ /* CRm=6 */
+ ID_SANITISED(ID_AA64ISAR0_EL1),
+ ID_SANITISED(ID_AA64ISAR1_EL1),
+ ID_UNALLOCATED(6,2),
+ ID_UNALLOCATED(6,3),
+ ID_UNALLOCATED(6,4),
+ ID_UNALLOCATED(6,5),
+ ID_UNALLOCATED(6,6),
+ ID_UNALLOCATED(6,7),
+
+ /* CRm=7 */
+ ID_SANITISED(ID_AA64MMFR0_EL1),
+ ID_SANITISED(ID_AA64MMFR1_EL1),
+ ID_SANITISED(ID_AA64MMFR2_EL1),
+ ID_UNALLOCATED(7,3),
+ ID_UNALLOCATED(7,4),
+ ID_UNALLOCATED(7,5),
+ ID_UNALLOCATED(7,6),
+ ID_UNALLOCATED(7,7),
+
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
@@ -1790,8 +2009,8 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
if (!r)
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
- /* Not saved in the sys_reg array? */
- if (r && !r->reg)
+ /* Not saved in the sys_reg array and not otherwise accessible? */
+ if (r && !(r->reg || r->get_user))
r = NULL;
return r;
@@ -1815,20 +2034,6 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
FUNCTION_INVARIANT(midr_el1)
FUNCTION_INVARIANT(ctr_el0)
FUNCTION_INVARIANT(revidr_el1)
-FUNCTION_INVARIANT(id_pfr0_el1)
-FUNCTION_INVARIANT(id_pfr1_el1)
-FUNCTION_INVARIANT(id_dfr0_el1)
-FUNCTION_INVARIANT(id_afr0_el1)
-FUNCTION_INVARIANT(id_mmfr0_el1)
-FUNCTION_INVARIANT(id_mmfr1_el1)
-FUNCTION_INVARIANT(id_mmfr2_el1)
-FUNCTION_INVARIANT(id_mmfr3_el1)
-FUNCTION_INVARIANT(id_isar0_el1)
-FUNCTION_INVARIANT(id_isar1_el1)
-FUNCTION_INVARIANT(id_isar2_el1)
-FUNCTION_INVARIANT(id_isar3_el1)
-FUNCTION_INVARIANT(id_isar4_el1)
-FUNCTION_INVARIANT(id_isar5_el1)
FUNCTION_INVARIANT(clidr_el1)
FUNCTION_INVARIANT(aidr_el1)
@@ -1836,20 +2041,6 @@ FUNCTION_INVARIANT(aidr_el1)
static struct sys_reg_desc invariant_sys_regs[] = {
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
- { SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
- { SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
- { SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
- { SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
- { SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
- { SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
- { SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
- { SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
- { SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
- { SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
- { SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
- { SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
- { SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
- { SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
@@ -2079,12 +2270,31 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
return true;
}
+static int walk_one_sys_reg(const struct sys_reg_desc *rd,
+ u64 __user **uind,
+ unsigned int *total)
+{
+ /*
+ * Ignore registers we trap but don't save,
+ * and for which no custom user accessor is provided.
+ */
+ if (!(rd->reg || rd->get_user))
+ return 0;
+
+ if (!copy_reg_to_user(rd, uind))
+ return -EFAULT;
+
+ (*total)++;
+ return 0;
+}
+
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
{
const struct sys_reg_desc *i1, *i2, *end1, *end2;
unsigned int total = 0;
size_t num;
+ int err;
/* We check for duplicates here, to allow arch-specific overrides. */
i1 = get_target_table(vcpu->arch.target, true, &num);
@@ -2098,21 +2308,13 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
while (i1 || i2) {
int cmp = cmp_sys_reg(i1, i2);
/* target-specific overrides generic entry. */
- if (cmp <= 0) {
- /* Ignore registers we trap but don't save. */
- if (i1->reg) {
- if (!copy_reg_to_user(i1, &uind))
- return -EFAULT;
- total++;
- }
- } else {
- /* Ignore registers we trap but don't save. */
- if (i2->reg) {
- if (!copy_reg_to_user(i2, &uind))
- return -EFAULT;
- total++;
- }
- }
+ if (cmp <= 0)
+ err = walk_one_sys_reg(i1, &uind, &total);
+ else
+ err = walk_one_sys_reg(i2, &uind, &total);
+
+ if (err)
+ return err;
if (cmp <= 0 && ++i1 == end1)
i1 = NULL;