diff options
Diffstat (limited to 'arch/powerpc/lib/sstep.c')
| -rw-r--r-- | arch/powerpc/lib/sstep.c | 678 |
1 files changed, 479 insertions, 199 deletions
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5abe98216dc2..ac3ee19531d8 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -15,8 +15,6 @@ #include <asm/cputable.h> #include <asm/disassemble.h> -extern char system_call_common[]; - #ifdef CONFIG_PPC64 /* Bits in SRR1 that are copied from MSR */ #define MSR_MASK 0xffffffff87c0ffffUL @@ -31,6 +29,10 @@ extern char system_call_common[]; #define XER_OV32 0x00080000U #define XER_CA32 0x00040000U +#ifdef CONFIG_VSX +#define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe)) +#endif + #ifdef CONFIG_PPC_FPU /* * Functions in ldstfp.S @@ -70,10 +72,8 @@ extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1, static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) { -#ifdef __powerpc64__ if ((msr & MSR_64BIT) == 0) val &= 0xffffffffUL; -#endif return val; } @@ -107,11 +107,11 @@ static nokprobe_inline long address_ok(struct pt_regs *regs, { if (!user_mode(regs)) return 1; - if (__access_ok(ea, nb, USER_DS)) + if (access_ok((void __user *)ea, nb)) return 1; - if (__access_ok(ea, 1, USER_DS)) + if (access_ok((void __user *)ea, 1)) /* Access overlaps the end of the user region */ - regs->dar = USER_DS.seg; + regs->dar = TASK_SIZE_MAX - 1; else regs->dar = ea; return 0; @@ -200,8 +200,8 @@ static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, unsigned int dd; unsigned long ea, d0, d1, d; - prefix_r = instr & (1ul << 20); - ra = (suffix >> 16) & 0x1f; + prefix_r = GET_PREFIX_R(instr); + ra = GET_PREFIX_RA(suffix); d0 = instr & 0x3ffff; d1 = suffix & 0xffff; @@ -218,10 +218,13 @@ static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr, ea += regs->gpr[ra]; else if (!prefix_r && !ra) ; /* Leave ea as is */ - else if (prefix_r && !ra) + else if (prefix_r) ea += regs->nip; - else if (prefix_r && ra) - ; /* Invalid form. Should already be checked for by caller! */ + + /* + * (prefix_r && ra) is an invalid form. Should already be + * checked for by caller! + */ return ea; } @@ -275,39 +278,70 @@ static nokprobe_inline void do_byte_reverse(void *ptr, int nb) up[1] = tmp; break; } + case 32: { + unsigned long *up = (unsigned long *)ptr; + unsigned long tmp; + + tmp = byterev_8(up[0]); + up[0] = byterev_8(up[3]); + up[3] = tmp; + tmp = byterev_8(up[2]); + up[2] = byterev_8(up[1]); + up[1] = tmp; + break; + } + #endif default: WARN_ON_ONCE(1); } } -static nokprobe_inline int read_mem_aligned(unsigned long *dest, - unsigned long ea, int nb, - struct pt_regs *regs) +static __always_inline int +__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) { - int err = 0; unsigned long x = 0; switch (nb) { case 1: - err = __get_user(x, (unsigned char __user *) ea); + unsafe_get_user(x, (unsigned char __user *)ea, Efault); break; case 2: - err = __get_user(x, (unsigned short __user *) ea); + unsafe_get_user(x, (unsigned short __user *)ea, Efault); break; case 4: - err = __get_user(x, (unsigned int __user *) ea); + unsafe_get_user(x, (unsigned int __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: - err = __get_user(x, (unsigned long __user *) ea); + unsafe_get_user(x, (unsigned long __user *)ea, Efault); break; #endif } - if (!err) - *dest = x; - else + *dest = x; + return 0; + +Efault: + regs->dar = ea; + return -EFAULT; +} + +static nokprobe_inline int +read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs) +{ + int err; + + if (is_kernel_addr(ea)) + return __read_mem_aligned(dest, ea, nb, regs); + + if (user_read_access_begin((void __user *)ea, nb)) { + err = __read_mem_aligned(dest, ea, nb, regs); + user_read_access_end(); + } else { + err = -EFAULT; regs->dar = ea; + } + return err; } @@ -315,10 +349,8 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest, * Copy from userspace to a buffer, using the largest possible * aligned accesses, up to sizeof(long). */ -static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, - struct pt_regs *regs) +static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { - int err = 0; int c; for (; nb > 0; nb -= c) { @@ -327,31 +359,46 @@ static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, c = max_align(nb); switch (c) { case 1: - err = __get_user(*dest, (unsigned char __user *) ea); + unsafe_get_user(*dest, (u8 __user *)ea, Efault); break; case 2: - err = __get_user(*(u16 *)dest, - (unsigned short __user *) ea); + unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault); break; case 4: - err = __get_user(*(u32 *)dest, - (unsigned int __user *) ea); + unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: - err = __get_user(*(unsigned long *)dest, - (unsigned long __user *) ea); + unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault); break; #endif } - if (err) { - regs->dar = ea; - return err; - } dest += c; ea += c; } return 0; + +Efault: + regs->dar = ea; + return -EFAULT; +} + +static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) +{ + int err; + + if (is_kernel_addr(ea)) + return __copy_mem_in(dest, ea, nb, regs); + + if (user_read_access_begin((void __user *)ea, nb)) { + err = __copy_mem_in(dest, ea, nb, regs); + user_read_access_end(); + } else { + err = -EFAULT; + regs->dar = ea; + } + + return err; } static nokprobe_inline int read_mem_unaligned(unsigned long *dest, @@ -389,30 +436,48 @@ static int read_mem(unsigned long *dest, unsigned long ea, int nb, } NOKPROBE_SYMBOL(read_mem); -static nokprobe_inline int write_mem_aligned(unsigned long val, - unsigned long ea, int nb, - struct pt_regs *regs) +static __always_inline int +__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) { - int err = 0; - switch (nb) { case 1: - err = __put_user(val, (unsigned char __user *) ea); + unsafe_put_user(val, (unsigned char __user *)ea, Efault); break; case 2: - err = __put_user(val, (unsigned short __user *) ea); + unsafe_put_user(val, (unsigned short __user *)ea, Efault); break; case 4: - err = __put_user(val, (unsigned int __user *) ea); + unsafe_put_user(val, (unsigned int __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: - err = __put_user(val, (unsigned long __user *) ea); + unsafe_put_user(val, (unsigned long __user *)ea, Efault); break; #endif } - if (err) + return 0; + +Efault: + regs->dar = ea; + return -EFAULT; +} + +static nokprobe_inline int +write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs) +{ + int err; + + if (is_kernel_addr(ea)) + return __write_mem_aligned(val, ea, nb, regs); + + if (user_write_access_begin((void __user *)ea, nb)) { + err = __write_mem_aligned(val, ea, nb, regs); + user_write_access_end(); + } else { + err = -EFAULT; regs->dar = ea; + } + return err; } @@ -420,10 +485,8 @@ static nokprobe_inline int write_mem_aligned(unsigned long val, * Copy from a buffer to userspace, using the largest possible * aligned accesses, up to sizeof(long). */ -static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, - struct pt_regs *regs) +static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) { - int err = 0; int c; for (; nb > 0; nb -= c) { @@ -432,31 +495,46 @@ static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, c = max_align(nb); switch (c) { case 1: - err = __put_user(*dest, (unsigned char __user *) ea); + unsafe_put_user(*dest, (u8 __user *)ea, Efault); break; case 2: - err = __put_user(*(u16 *)dest, - (unsigned short __user *) ea); + unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault); break; case 4: - err = __put_user(*(u32 *)dest, - (unsigned int __user *) ea); + unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault); break; #ifdef __powerpc64__ case 8: - err = __put_user(*(unsigned long *)dest, - (unsigned long __user *) ea); + unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault); break; #endif } - if (err) { - regs->dar = ea; - return err; - } dest += c; ea += c; } return 0; + +Efault: + regs->dar = ea; + return -EFAULT; +} + +static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs) +{ + int err; + + if (is_kernel_addr(ea)) + return __copy_mem_out(dest, ea, nb, regs); + + if (user_write_access_begin((void __user *)ea, nb)) { + err = __copy_mem_out(dest, ea, nb, regs); + user_write_access_end(); + } else { + err = -EFAULT; + regs->dar = ea; + } + + return err; } static nokprobe_inline int write_mem_unaligned(unsigned long val, @@ -508,6 +586,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea, } u; nb = GETSIZE(op->type); + if (nb > sizeof(u)) + return -EINVAL; if (!address_ok(regs, ea, nb)) return -EFAULT; rn = op->reg; @@ -558,6 +638,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea, } u; nb = GETSIZE(op->type); + if (nb > sizeof(u)) + return -EINVAL; if (!address_ok(regs, ea, nb)) return -EFAULT; rn = op->reg; @@ -602,6 +684,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea, u8 b[sizeof(__vector128)]; } u = {}; + if (size > sizeof(u)) + return -EINVAL; + if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; /* align to multiple of size */ @@ -610,7 +695,7 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea, if (err) return err; if (unlikely(cross_endian)) - do_byte_reverse(&u.b[ea & 0xf], size); + do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u))); preempt_disable(); if (regs->msr & MSR_VEC) put_vr(rn, &u.v); @@ -629,6 +714,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea, u8 b[sizeof(__vector128)]; } u; + if (size > sizeof(u)) + return -EINVAL; + if (!address_ok(regs, ea & ~0xfUL, 16)) return -EFAULT; /* align to multiple of size */ @@ -641,7 +729,7 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea, u.v = current->thread.vr_state.vr[rn]; preempt_enable(); if (unlikely(cross_endian)) - do_byte_reverse(&u.b[ea & 0xf], size); + do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u))); return copy_mem_out(&u.b[ea & 0xf], ea, size, regs); } #endif /* CONFIG_ALTIVEC */ @@ -692,8 +780,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, #endif /* __powerpc64 */ #ifdef CONFIG_VSX -void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, - const void *mem, bool rev) +static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, + const void *mem, bool rev) { int size, read_size; int i, j; @@ -705,6 +793,8 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, reg->d[0] = reg->d[1] = 0; switch (op->element_size) { + case 32: + /* [p]lxvp[x] */ case 16: /* whole vector; lxv[x] or lxvl[l] */ if (size == 0) @@ -713,7 +803,7 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, if (IS_LE && (op->vsx_flags & VSX_LDLEFT)) rev = !rev; if (rev) - do_byte_reverse(reg, 16); + do_byte_reverse(reg, size); break; case 8: /* scalar loads, lxvd2x, lxvdsx */ @@ -773,11 +863,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, break; } } -EXPORT_SYMBOL_GPL(emulate_vsx_load); -NOKPROBE_SYMBOL(emulate_vsx_load); -void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, - void *mem, bool rev) +static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, + void *mem, bool rev) { int size, write_size; int i, j; @@ -789,6 +877,22 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, size = GETSIZE(op->type); switch (op->element_size) { + case 32: + /* [p]stxvp[x] */ + if (size == 0) + break; + if (rev) { + /* reverse 32 bytes */ + union vsx_reg buf32[2]; + buf32[0].d[0] = byterev_8(reg[1].d[1]); + buf32[0].d[1] = byterev_8(reg[1].d[0]); + buf32[1].d[0] = byterev_8(reg[0].d[1]); + buf32[1].d[1] = byterev_8(reg[0].d[0]); + memcpy(mem, buf32, size); + } else { + memcpy(mem, reg, size); + } + break; case 16: /* stxv, stxvx, stxvl, stxvll */ if (size == 0) @@ -849,36 +953,49 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, break; } } -EXPORT_SYMBOL_GPL(emulate_vsx_store); -NOKPROBE_SYMBOL(emulate_vsx_store); static nokprobe_inline int do_vsx_load(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, bool cross_endian) { int reg = op->reg; - u8 mem[16]; - union vsx_reg buf; + int i, j, nr_vsx_regs; + u8 mem[32]; + union vsx_reg buf[2]; int size = GETSIZE(op->type); if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs)) return -EFAULT; - emulate_vsx_load(op, &buf, mem, cross_endian); + nr_vsx_regs = max(1ul, size / sizeof(__vector128)); + emulate_vsx_load(op, buf, mem, cross_endian); preempt_disable(); if (reg < 32) { /* FP regs + extensions */ if (regs->msr & MSR_FP) { - load_vsrn(reg, &buf); + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + load_vsrn(reg + i, &buf[j].v); + } } else { - current->thread.fp_state.fpr[reg][0] = buf.d[0]; - current->thread.fp_state.fpr[reg][1] = buf.d[1]; + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0]; + current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1]; + } } } else { - if (regs->msr & MSR_VEC) - load_vsrn(reg, &buf); - else - current->thread.vr_state.vr[reg - 32] = buf.v; + if (regs->msr & MSR_VEC) { + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + load_vsrn(reg + i, &buf[j].v); + } + } else { + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + current->thread.vr_state.vr[reg - 32 + i] = buf[j].v; + } + } } preempt_enable(); return 0; @@ -889,63 +1006,96 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op, bool cross_endian) { int reg = op->reg; - u8 mem[16]; - union vsx_reg buf; + int i, j, nr_vsx_regs; + u8 mem[32]; + union vsx_reg buf[2]; int size = GETSIZE(op->type); if (!address_ok(regs, ea, size)) return -EFAULT; + nr_vsx_regs = max(1ul, size / sizeof(__vector128)); preempt_disable(); if (reg < 32) { /* FP regs + extensions */ if (regs->msr & MSR_FP) { - store_vsrn(reg, &buf); + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + store_vsrn(reg + i, &buf[j].v); + } } else { - buf.d[0] = current->thread.fp_state.fpr[reg][0]; - buf.d[1] = current->thread.fp_state.fpr[reg][1]; + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0]; + buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1]; + } } } else { - if (regs->msr & MSR_VEC) - store_vsrn(reg, &buf); - else - buf.v = current->thread.vr_state.vr[reg - 32]; + if (regs->msr & MSR_VEC) { + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + store_vsrn(reg + i, &buf[j].v); + } + } else { + for (i = 0; i < nr_vsx_regs; i++) { + j = IS_LE ? nr_vsx_regs - i - 1 : i; + buf[j].v = current->thread.vr_state.vr[reg - 32 + i]; + } + } } preempt_enable(); - emulate_vsx_store(op, &buf, mem, cross_endian); + emulate_vsx_store(op, buf, mem, cross_endian); return copy_mem_out(mem, ea, size, regs); } #endif /* CONFIG_VSX */ +static __always_inline int __emulate_dcbz(unsigned long ea) +{ + unsigned long i; + unsigned long size = l1_dcache_bytes(); + + for (i = 0; i < size; i += sizeof(long)) + unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault); + + return 0; + +Efault: + return -EFAULT; +} + int emulate_dcbz(unsigned long ea, struct pt_regs *regs) { int err; - unsigned long i, size; + unsigned long size = l1_dcache_bytes(); -#ifdef __powerpc64__ - size = ppc64_caches.l1d.block_size; - if (!(regs->msr & MSR_64BIT)) - ea &= 0xffffffffUL; -#else - size = L1_CACHE_BYTES; -#endif + ea = truncate_if_32bit(regs->msr, ea); ea &= ~(size - 1); if (!address_ok(regs, ea, size)) return -EFAULT; - for (i = 0; i < size; i += sizeof(long)) { - err = __put_user(0, (unsigned long __user *) (ea + i)); - if (err) { - regs->dar = ea; - return err; - } + + if (is_kernel_addr(ea)) { + err = __emulate_dcbz(ea); + } else if (user_write_access_begin((void __user *)ea, size)) { + err = __emulate_dcbz(ea); + user_write_access_end(); + } else { + err = -EFAULT; } - return 0; + + if (err) + regs->dar = ea; + + + return err; } NOKPROBE_SYMBOL(emulate_dcbz); #define __put_user_asmx(x, addr, err, op, cr) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine power8\n" \ "1: " op " %2,0,%3\n" \ + ".machine pop\n" \ " mfcr %1\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ @@ -958,7 +1108,10 @@ NOKPROBE_SYMBOL(emulate_dcbz); #define __get_user_asmx(x, addr, err, op) \ __asm__ __volatile__( \ + ".machine push\n" \ + ".machine power8\n" \ "1: "op" %1,0,%2\n" \ + ".machine pop\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3: li %0,%3\n" \ @@ -987,10 +1140,8 @@ static nokprobe_inline void set_cr0(const struct pt_regs *regs, op->type |= SETCC; op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); -#ifdef __powerpc64__ if (!(regs->msr & MSR_64BIT)) val = (int) val; -#endif if (val < 0) op->ccval |= 0x80000000; else if (val > 0) @@ -1018,15 +1169,11 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs, if (carry_in) ++val; - op->type = COMPUTE + SETREG + SETXER; + op->type = COMPUTE | SETREG | SETXER; op->reg = rd; op->val = val; -#ifdef __powerpc64__ - if (!(regs->msr & MSR_64BIT)) { - val = (unsigned int) val; - val1 = (unsigned int) val1; - } -#endif + val = truncate_if_32bit(regs->msr, val); + val1 = truncate_if_32bit(regs->msr, val1); op->xerval = regs->xer; if (val < val1 || (carry_in && val == val1)) op->xerval |= XER_CA; @@ -1043,7 +1190,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs, { unsigned int crval, shift; - op->type = COMPUTE + SETCC; + op->type = COMPUTE | SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; @@ -1062,7 +1209,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs, { unsigned int crval, shift; - op->type = COMPUTE + SETCC; + op->type = COMPUTE | SETCC; crval = (regs->xer >> 31) & 1; /* get SO bit */ if (v1 < v2) crval |= 8; @@ -1202,7 +1349,7 @@ static nokprobe_inline int trap_compare(long v1, long v2) * otherwise. */ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, - struct ppc_inst instr) + ppc_inst_t instr) { #ifdef CONFIG_PPC64 unsigned int suffixopcode, prefixtype, prefix_r; @@ -1232,14 +1379,17 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, if (branch_taken(word, regs, op)) op->type |= BRTAKEN; return 1; -#ifdef CONFIG_PPC64 case 17: /* sc */ if ((word & 0xfe2) == 2) op->type = SYSCALL; - else + else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && + (word & 0xfe3) == 1) { /* scv */ + op->type = SYSCALL_VECTORED_0; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; + } else op->type = UNKNOWN; return 0; -#endif case 18: /* b */ op->type = BRANCH | BRTAKEN; imm = word & 0x03fffffc; @@ -1275,7 +1425,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 1; case 18: /* rfid, scary */ - if (regs->msr & MSR_PR) + if (user_mode(regs)) goto priv; op->type = RFI; return 0; @@ -1327,10 +1477,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; } - /* Following cases refer to regs->gpr[], so we need all regs */ - if (!FULL_REGS(regs)) - return -1; - rd = (word >> 21) & 0x1f; ra = (word >> 16) & 0x1f; rb = (word >> 11) & 0x1f; @@ -1339,8 +1485,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, switch (opcode) { #ifdef __powerpc64__ case 1: - prefix_r = word & (1ul << 20); - ra = (suffix >> 16) & 0x1f; + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + goto unknown_opcode; + + prefix_r = GET_PREFIX_R(word); + ra = GET_PREFIX_RA(suffix); rd = (suffix >> 21) & 0x1f; op->reg = rd; op->val = regs->gpr[rd]; @@ -1370,8 +1519,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 4: + /* + * There are very many instructions with this primary opcode + * introduced in the ISA as early as v2.03. However, the ones + * we currently emulate were all introduced with ISA 3.0 + */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; switch (word & 0x3f) { case 48: /* maddhd */ @@ -1397,7 +1551,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, * There are other instructions from ISA 3.0 with the same * primary opcode which do not have emulation support yet. */ - return -1; + goto unknown_opcode; #endif case 7: /* mulli */ @@ -1457,6 +1611,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 19: if (((word >> 1) & 0x1f) == 2) { /* addpcis */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; imm = (short) (word & 0xffc1); /* d0 + d2 fields */ imm |= (word >> 15) & 0x3e; /* d1 field */ op->val = regs->nip + (imm << 16) + 4; @@ -1582,13 +1738,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 1; #endif case 83: /* mfmsr */ - if (regs->msr & MSR_PR) + if (user_mode(regs)) goto priv; op->type = MFMSR; op->reg = rd; return 0; case 146: /* mtmsr */ - if (regs->msr & MSR_PR) + if (user_mode(regs)) goto priv; op->type = MTMSR; op->reg = rd; @@ -1596,7 +1752,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 0; #ifdef CONFIG_PPC64 case 178: /* mtmsrd */ - if (regs->msr & MSR_PR) + if (user_mode(regs)) goto priv; op->type = MTMSR; op->reg = rd; @@ -1620,6 +1776,28 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->val = regs->ccr & imm; goto compute_done; + case 128: /* setb */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; + /* + * 'ra' encodes the CR field number (bfa) in the top 3 bits. + * Since each CR field is 4 bits, + * we can simply mask off the bottom two bits (bfa * 4) + * to yield the first bit in the CR field. + */ + ra = ra & ~0x3; + /* 'val' stores bits of the CR field (bfa) */ + val = regs->ccr >> (CR0_SHIFT - ra); + /* checks if the LT bit of CR field (bfa) is set */ + if (val & 8) + op->val = -1; + /* checks if the GT bit of CR field (bfa) is set */ + else if (val & 4) + op->val = 1; + else + op->val = 0; + goto compute_done; + case 144: /* mtcrf */ op->type = COMPUTE + SETCC; imm = 0xf0000000UL; @@ -1769,7 +1947,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #ifdef __powerpc64__ case 265: /* modud */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = regs->gpr[ra] % regs->gpr[rb]; goto compute_done; #endif @@ -1779,7 +1957,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 267: /* moduw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = (unsigned int) regs->gpr[ra] % (unsigned int) regs->gpr[rb]; goto compute_done; @@ -1802,10 +1980,21 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->val = (int) regs->gpr[ra] / (int) regs->gpr[rb]; goto arith_done; - +#ifdef __powerpc64__ + case 425: /* divde[.] */ + asm volatile(PPC_DIVDE(%0, %1, %2) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb])); + goto arith_done; + case 393: /* divdeu[.] */ + asm volatile(PPC_DIVDEU(%0, %1, %2) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb])); + goto arith_done; +#endif case 755: /* darn */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; switch (ra & 0x3) { case 0: /* 32-bit conditioned */ @@ -1823,18 +2012,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, goto compute_done; } - return -1; + goto unknown_opcode; #ifdef __powerpc64__ case 777: /* modsd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = (long int) regs->gpr[ra] % (long int) regs->gpr[rb]; goto compute_done; #endif case 779: /* modsw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->val = (int) regs->gpr[ra] % (int) regs->gpr[rb]; goto compute_done; @@ -1911,14 +2100,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif case 538: /* cnttzw */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; val = (unsigned int) regs->gpr[rd]; op->val = (val ? __builtin_ctz(val) : 32); goto logical_done; #ifdef __powerpc64__ case 570: /* cnttzd */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; val = regs->gpr[rd]; op->val = (val ? __builtin_ctzl(val) : 64); goto logical_done; @@ -2028,7 +2217,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 890: /* extswsli with sh_5 = 0 */ case 891: /* extswsli with sh_5 = 1 */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) - return -1; + goto unknown_opcode; op->type = COMPUTE + SETREG; sh = rb | ((word & 2) << 4); val = (signed int) regs->gpr[rd]; @@ -2101,15 +2290,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->type = MKOP(STCX, 0, 4); break; -#ifdef __powerpc64__ - case 84: /* ldarx */ - op->type = MKOP(LARX, 0, 8); - break; - - case 214: /* stdcx. */ - op->type = MKOP(STCX, 0, 8); - break; - +#ifdef CONFIG_PPC_HAS_LBARX_LHARX case 52: /* lbarx */ op->type = MKOP(LARX, 0, 1); break; @@ -2125,6 +2306,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 726: /* sthcx. */ op->type = MKOP(STCX, 0, 2); break; +#endif +#ifdef __powerpc64__ + case 84: /* ldarx */ + op->type = MKOP(LARX, 0, 8); + break; + + case 214: /* stdcx. */ + op->type = MKOP(STCX, 0, 8); + break; case 276: /* lqarx */ if (!((rd & 1) || rd == ra || rd == rb)) @@ -2355,6 +2545,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 268: /* lxvx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 16; @@ -2364,6 +2556,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 269: /* lxvl */ case 301: { /* lxvll */ int nb; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; @@ -2382,7 +2576,17 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->vsx_flags = VSX_SPLAT; break; + case 333: /* lxvpx */ + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + goto unknown_opcode; + op->reg = VSX_REGISTER_XTP(rd); + op->type = MKOP(LOAD_VSX, 0, 32); + op->element_size = 32; + break; + case 364: /* lxvwsx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 4; @@ -2390,6 +2594,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 396: /* stxvx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 16; @@ -2399,6 +2605,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 397: /* stxvl */ case 429: { /* stxvll */ int nb; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->ea = ra ? regs->gpr[ra] : 0; nb = regs->gpr[rb] & 0xff; @@ -2410,6 +2618,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, VSX_CHECK_VEC; break; } + case 461: /* stxvpx */ + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + goto unknown_opcode; + op->reg = VSX_REGISTER_XTP(rd); + op->type = MKOP(STORE_VSX, 0, 32); + op->element_size = 32; + break; case 524: /* lxsspx */ op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 4); @@ -2443,6 +2658,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 781: /* lxsibzx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 1); op->element_size = 8; @@ -2450,6 +2667,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 812: /* lxvh8x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 2; @@ -2457,6 +2676,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 813: /* lxsihzx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 2); op->element_size = 8; @@ -2470,6 +2691,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 876: /* lxvb16x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(LOAD_VSX, 0, 16); op->element_size = 1; @@ -2483,6 +2706,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 909: /* stxsibx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 1); op->element_size = 8; @@ -2490,6 +2715,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 940: /* stxvh8x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 2; @@ -2497,6 +2724,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 941: /* stxsihx */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 2); op->element_size = 8; @@ -2510,6 +2739,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 1004: /* stxvb16x */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd | ((word & 1) << 5); op->type = MKOP(STORE_VSX, 0, 16); op->element_size = 1; @@ -2618,12 +2849,16 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->type = MKOP(LOAD_FP, 0, 16); break; case 2: /* lxsd */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 8); op->element_size = 8; op->vsx_flags = VSX_CHECK_VEC; break; case 3: /* lxssp */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->reg = rd + 32; op->type = MKOP(LOAD_VSX, 0, 4); op->element_size = 8; @@ -2651,6 +2886,22 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, #endif #ifdef CONFIG_VSX + case 6: + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + goto unknown_opcode; + op->ea = dqform_ea(word, regs); + op->reg = VSX_REGISTER_XTP(rd); + op->element_size = 32; + switch (word & 0xf) { + case 0: /* lxvp */ + op->type = MKOP(LOAD_VSX, 0, 32); + break; + case 1: /* stxvp */ + op->type = MKOP(STORE_VSX, 0, 32); + break; + } + break; + case 61: /* stfdp, lxv, stxsd, stxssp, stxv */ switch (word & 7) { case 0: /* stfdp with LSB of DS field = 0 */ @@ -2660,6 +2911,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 1: /* lxv */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dqform_ea(word, regs); if (word & 8) op->reg = rd + 32; @@ -2670,6 +2923,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 2: /* stxsd with LSB of DS field = 0 */ case 6: /* stxsd with LSB of DS field = 1 */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 8); @@ -2679,6 +2934,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 3: /* stxssp with LSB of DS field = 0 */ case 7: /* stxssp with LSB of DS field = 1 */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dsform_ea(word, regs); op->reg = rd + 32; op->type = MKOP(STORE_VSX, 0, 4); @@ -2687,6 +2944,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, break; case 5: /* stxv */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + goto unknown_opcode; op->ea = dqform_ea(word, regs); if (word & 8) op->reg = rd + 32; @@ -2715,8 +2974,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } break; case 1: /* Prefixed instructions */ - prefix_r = word & (1ul << 20); - ra = (suffix >> 16) & 0x1f; + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + goto unknown_opcode; + + prefix_r = GET_PREFIX_R(word); + ra = GET_PREFIX_RA(suffix); op->update_reg = ra; rd = (suffix >> 21) & 0x1f; op->reg = rd; @@ -2733,6 +2995,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 41: /* plwa */ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4); break; +#ifdef CONFIG_VSX case 42: /* plxsd */ op->reg = rd + 32; op->type = MKOP(LOAD_VSX, PREFIXED, 8); @@ -2773,18 +3036,33 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->element_size = 16; op->vsx_flags = VSX_CHECK_VEC; break; +#endif /* CONFIG_VSX */ case 56: /* plq */ op->type = MKOP(LOAD, PREFIXED, 16); break; case 57: /* pld */ op->type = MKOP(LOAD, PREFIXED, 8); break; - case 60: /* stq */ +#ifdef CONFIG_VSX + case 58: /* plxvp */ + op->reg = VSX_REGISTER_XTP(rd); + op->type = MKOP(LOAD_VSX, PREFIXED, 32); + op->element_size = 32; + break; +#endif /* CONFIG_VSX */ + case 60: /* pstq */ op->type = MKOP(STORE, PREFIXED, 16); break; case 61: /* pstd */ op->type = MKOP(STORE, PREFIXED, 8); break; +#ifdef CONFIG_VSX + case 62: /* pstxvp */ + op->reg = VSX_REGISTER_XTP(rd); + op->type = MKOP(STORE_VSX, PREFIXED, 32); + op->element_size = 32; + break; +#endif /* CONFIG_VSX */ } break; case 1: /* Type 01 Eight-Byte Register-to-Register */ @@ -2836,6 +3114,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, } + if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) { + switch (GETTYPE(op->type)) { + case LOAD: + if (ra == rd) + goto unknown_opcode; + fallthrough; + case STORE: + case LOAD_FP: + case STORE_FP: + if (ra == 0) + goto unknown_opcode; + } + } + #ifdef CONFIG_VSX if ((GETTYPE(op->type) == LOAD_VSX || GETTYPE(op->type) == STORE_VSX) && @@ -2846,6 +3138,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, return 0; + unknown_opcode: + op->type = UNKNOWN; + return 0; + logical_done: if (word & 1) set_cr0(regs, op); @@ -2885,15 +3181,6 @@ NOKPROBE_SYMBOL(analyse_instr); */ static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs) { -#ifdef CONFIG_PPC32 - /* - * Check if we will touch kernel stack overflow - */ - if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) { - printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n"); - return -EINVAL; - } -#endif /* CONFIG_PPC32 */ /* * Check if we already set since that means we'll * lose the previous value. @@ -2971,12 +3258,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) case BARRIER_EIEIO: eieio(); break; +#ifdef CONFIG_PPC64 case BARRIER_LWSYNC: asm volatile("lwsync" : : : "memory"); break; case BARRIER_PTESYNC: asm volatile("ptesync" : : : "memory"); break; +#endif } break; @@ -3015,7 +3304,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) default: WARN_ON_ONCE(1); } - regs->nip = next_pc; + regs_set_return_ip(regs, next_pc); } NOKPROBE_SYMBOL(emulate_update_regs); @@ -3052,7 +3341,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) err = 0; val = 0; switch (size) { -#ifdef __powerpc64__ +#ifdef CONFIG_PPC_HAS_LBARX_LHARX case 1: __get_user_asmx(val, ea, err, "lbarx"); break; @@ -3094,7 +3383,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) __put_user_asmx(op->val, ea, err, "stbcx.", cr); break; case 2: - __put_user_asmx(op->val, ea, err, "stbcx.", cr); + __put_user_asmx(op->val, ea, err, "sthcx.", cr); break; #endif case 4: @@ -3144,14 +3433,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) * stored in the thread_struct. If the instruction is in * the kernel, we must not touch the state in the thread_struct. */ - if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) + if (!user_mode(regs) && !(regs->msr & MSR_FP)) return 0; err = do_fp_load(op, ea, regs, cross_endian); break; #endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: - if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) + if (!user_mode(regs) && !(regs->msr & MSR_VEC)) return 0; err = do_vec_load(op->reg, ea, size, regs, cross_endian); break; @@ -3166,7 +3455,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) */ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) msrbit = MSR_VEC; - if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) + if (!user_mode(regs) && !(regs->msr & msrbit)) return 0; err = do_vsx_load(op, ea, regs, cross_endian); break; @@ -3202,8 +3491,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) } #endif if ((op->type & UPDATE) && size == sizeof(long) && - op->reg == 1 && op->update_reg == 1 && - !(regs->msr & MSR_PR) && + op->reg == 1 && op->update_reg == 1 && !user_mode(regs) && ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { err = handle_stack_update(ea, regs); break; @@ -3215,14 +3503,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) #ifdef CONFIG_PPC_FPU case STORE_FP: - if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) + if (!user_mode(regs) && !(regs->msr & MSR_FP)) return 0; err = do_fp_store(op, ea, regs, cross_endian); break; #endif #ifdef CONFIG_ALTIVEC case STORE_VMX: - if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) + if (!user_mode(regs) && !(regs->msr & MSR_VEC)) return 0; err = do_vec_store(op->reg, ea, size, regs, cross_endian); break; @@ -3237,7 +3525,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) */ if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) msrbit = MSR_VEC; - if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) + if (!user_mode(regs) && !(regs->msr & msrbit)) return 0; err = do_vsx_store(op, ea, regs, cross_endian); break; @@ -3285,7 +3573,7 @@ NOKPROBE_SYMBOL(emulate_loadstore); * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ -int emulate_step(struct pt_regs *regs, struct ppc_inst instr) +int emulate_step(struct pt_regs *regs, ppc_inst_t instr) { struct instruction_op op; int r, err, type; @@ -3353,39 +3641,31 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr) /* can't step mtmsr[d] that would clear MSR_RI */ return -1; /* here op.val is the mask of bits to change */ - regs->msr = (regs->msr & ~op.val) | (val & op.val); + regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val)); goto instr_done; -#ifdef CONFIG_PPC64 case SYSCALL: /* sc */ /* - * N.B. this uses knowledge about how the syscall - * entry code works. If that is changed, this will - * need to be changed also. + * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't + * single step a system call instruction: + * + * Successful completion for an instruction means that the + * instruction caused no other interrupt. Thus a Trace + * interrupt never occurs for a System Call or System Call + * Vectored instruction, or for a Trap instruction that + * traps. */ - if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) && - cpu_has_feature(CPU_FTR_REAL_LE) && - regs->gpr[0] == 0x1ebe) { - regs->msr ^= MSR_LE; - goto instr_done; - } - regs->gpr[9] = regs->gpr[13]; - regs->gpr[10] = MSR_KERNEL; - regs->gpr[11] = regs->nip + 4; - regs->gpr[12] = regs->msr & MSR_MASK; - regs->gpr[13] = (unsigned long) get_paca(); - regs->nip = (unsigned long) &system_call_common; - regs->msr = MSR_KERNEL; - return 1; - + return -1; + case SYSCALL_VECTORED_0: /* scv 0 */ + return -1; case RFI: return -1; -#endif } return 0; instr_done: - regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)); + regs_set_return_ip(regs, + truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type))); return 1; } NOKPROBE_SYMBOL(emulate_step); |
