From 94afd069d937d84fb4f696eb9a78db4084e43d21 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 6 May 2020 13:40:31 +1000 Subject: powerpc: Use a datatype for instructions Currently unsigned ints are used to represent instructions on powerpc. This has worked well as instructions have always been 4 byte words. However, ISA v3.1 introduces some changes to instructions that mean this scheme will no longer work as well. This change is Prefixed Instructions. A prefixed instruction is made up of a word prefix followed by a word suffix to make an 8 byte double word instruction. No matter the endianness of the system the prefix always comes first. Prefixed instructions are only planned for powerpc64. Introduce a ppc_inst type to represent both prefixed and word instructions on powerpc64 while keeping it possible to exclusively have word instructions on powerpc32. Signed-off-by: Jordan Niethe [mpe: Fix compile error in emulate_spe()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200506034050.24806-12-jniethe5@gmail.com --- arch/powerpc/kernel/align.c | 9 ++-- arch/powerpc/kernel/crash_dump.c | 2 +- arch/powerpc/kernel/epapr_paravirt.c | 6 +-- arch/powerpc/kernel/hw_breakpoint.c | 4 +- arch/powerpc/kernel/jump_label.c | 2 +- arch/powerpc/kernel/kgdb.c | 4 +- arch/powerpc/kernel/kprobes.c | 8 ++-- arch/powerpc/kernel/mce_power.c | 5 ++- arch/powerpc/kernel/optprobes.c | 64 +++++++++++++++------------ arch/powerpc/kernel/setup_32.c | 4 +- arch/powerpc/kernel/trace/ftrace.c | 83 ++++++++++++++++++------------------ arch/powerpc/kernel/vecemu.c | 5 ++- 12 files changed, 105 insertions(+), 91 deletions(-) (limited to 'arch/powerpc/kernel') diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index a63216da8cf1..9b35d6160507 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -105,7 +105,7 @@ static struct aligninfo spe_aligninfo[32] = { * so we don't need the address swizzling. */ static int emulate_spe(struct pt_regs *regs, unsigned int reg, - unsigned int instr) + struct ppc_inst ppc_instr) { int ret; union { @@ -116,8 +116,9 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, } data, temp; unsigned char __user *p, *addr; unsigned long *evr = ¤t->thread.evr[reg]; - unsigned int nb, flags; + unsigned int nb, flags, instr; + instr = ppc_inst_val(ppc_instr); instr = (instr >> 1) & 0x1f; /* DAR has the operand effective address */ @@ -294,7 +295,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, int fix_alignment(struct pt_regs *regs) { - unsigned int instr; + struct ppc_inst instr; struct instruction_op op; int r, type; @@ -304,7 +305,7 @@ int fix_alignment(struct pt_regs *regs) */ CHECK_FULL_REGS(regs); - if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip))) + if (unlikely(__get_user(instr.val, (unsigned int __user *)regs->nip))) return -EFAULT; if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) { /* We don't handle PPC little-endian any more... */ diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 78e556b131db..72bafb47e757 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void) static void __init create_trampoline(unsigned long addr) { - unsigned int *p = (unsigned int *)addr; + struct ppc_inst *p = (struct ppc_inst *)addr; /* The maximum range of a single instruction branch, is the current * instruction's address + (32 MB - 4) bytes. For the trampoline we diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index e8eb72a65572..2ed14d4a47f5 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c @@ -37,10 +37,10 @@ static int __init early_init_dt_scan_epapr(unsigned long node, return -1; for (i = 0; i < (len / 4); i++) { - u32 inst = ppc_inst(be32_to_cpu(insts[i])); - patch_instruction(epapr_hypercall_start + i, inst); + struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i])); + patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst); #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) - patch_instruction(epapr_ev_idle_start + i, inst); + patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst); #endif } diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 46e09ac8b84a..2db9a7ac7bcb 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -244,12 +244,12 @@ dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info) static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp, struct arch_hw_breakpoint *info) { - unsigned int instr = ppc_inst(0); + struct ppc_inst instr = ppc_inst(0); int ret, type, size; struct instruction_op op; unsigned long addr = info->address; - if (__get_user_inatomic(instr, (unsigned int *)regs->nip)) + if (__get_user_inatomic(instr.val, (unsigned int *)regs->nip)) goto fail; ret = analyse_instr(&op, regs, instr); diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c index daa4afce7ec8..144858027fa3 100644 --- a/arch/powerpc/kernel/jump_label.c +++ b/arch/powerpc/kernel/jump_label.c @@ -11,7 +11,7 @@ void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type) { - u32 *addr = (u32 *)(unsigned long)entry->code; + struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code; if (type == JUMP_LABEL_JMP) patch_branch(addr, entry->target, 0); diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index a6b38a19133f..652b2852bea3 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -419,7 +419,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned int instr; - unsigned int *addr = (unsigned int *)bpt->bpt_addr; + struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; err = probe_kernel_address(addr, instr); if (err) @@ -438,7 +438,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned int instr = *(unsigned int *)bpt->saved_instr; - unsigned int *addr = (unsigned int *)bpt->bpt_addr; + struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr; err = patch_instruction(addr, ppc_inst(instr)); if (err) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 92fa3070d905..a08ae5803622 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -106,7 +106,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) int arch_prepare_kprobe(struct kprobe *p) { int ret = 0; - kprobe_opcode_t insn = *p->addr; + struct ppc_inst insn = *(struct ppc_inst *)p->addr; if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); @@ -139,13 +139,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe); void arch_arm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); + patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)); } NOKPROBE_SYMBOL(arch_arm_kprobe); void arch_disarm_kprobe(struct kprobe *p) { - patch_instruction(p->addr, ppc_inst(p->opcode)); + patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode)); } NOKPROBE_SYMBOL(arch_disarm_kprobe); @@ -217,7 +217,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe); static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) { int ret; - unsigned int insn = *p->ainsn.insn; + struct ppc_inst insn = *(struct ppc_inst *)p->ainsn.insn; /* regs->nip is also adjusted if emulate_step returns 1 */ ret = emulate_step(regs, insn); diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 1d18991f3854..08b355f80d9e 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * Convert an address related to an mm to a PFN. NOTE: we are in real @@ -369,7 +370,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, * in real-mode is tricky and can lead to recursive * faults */ - int instr; + struct ppc_inst instr; unsigned long pfn, instr_addr; struct instruction_op op; struct pt_regs tmp = *regs; @@ -377,7 +378,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr, pfn = addr_to_pfn(regs, regs->nip); if (pfn != ULONG_MAX) { instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK); - instr = *(unsigned int *)(instr_addr); + instr = *(struct ppc_inst *)(instr_addr); if (!analyse_instr(&op, &tmp, instr)) { pfn = addr_to_pfn(regs, op.ea); *addr = op.ea; diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c index 44006c4ca4f1..5a71fef71c22 100644 --- a/arch/powerpc/kernel/optprobes.c +++ b/arch/powerpc/kernel/optprobes.c @@ -100,8 +100,9 @@ static unsigned long can_optimize(struct kprobe *p) * Ensure that the instruction is not a conditional branch, * and that can be emulated. */ - if (!is_conditional_branch(*p->ainsn.insn) && - analyse_instr(&op, ®s, *p->ainsn.insn) == 1) { + if (!is_conditional_branch(*(struct ppc_inst *)p->ainsn.insn) && + analyse_instr(&op, ®s, + *(struct ppc_inst *)p->ainsn.insn) == 1) { emulate_update_regs(®s, &op); nip = regs.nip; } @@ -148,13 +149,15 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op) void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) { /* addis r4,0,(insn)@h */ - patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | - ((val >> 16) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) | + ((val >> 16) & 0xffff))); addr++; /* ori r4,r4,(insn)@l */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | - ___PPC_RS(4) | (val & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(4) | + ___PPC_RS(4) | (val & 0xffff))); } /* @@ -164,34 +167,39 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr) void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr) { /* lis r3,(op)@highest */ - patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | - ((val >> 48) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) | + ((val >> 48) & 0xffff))); addr++; /* ori r3,r3,(op)@higher */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 32) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 32) & 0xffff))); addr++; /* rldicr r3,r3,32,31 */ - patch_instruction(addr, ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | - ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) | + ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31))); addr++; /* oris r3,r3,(op)@h */ - patch_instruction(addr, ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | - ___PPC_RS(3) | ((val >> 16) & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) | + ___PPC_RS(3) | ((val >> 16) & 0xffff))); addr++; /* ori r3,r3,(op)@l */ - patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | - ___PPC_RS(3) | (val & 0xffff))); + patch_instruction((struct ppc_inst *)addr, + ppc_inst(PPC_INST_ORI | ___PPC_RA(3) | + ___PPC_RS(3) | (val & 0xffff))); } int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) { - kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step; - kprobe_opcode_t *op_callback_addr, *emulate_step_addr; + struct ppc_inst branch_op_callback, branch_emulate_step; + kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff; long b_offset; unsigned long nip, size; int rc, i; @@ -231,7 +239,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int); pr_devel("Copying template to %p, size %lu\n", buff, size); for (i = 0; i < size; i++) { - rc = patch_instruction(buff + i, + rc = patch_instruction((struct ppc_inst *)(buff + i), ppc_inst(*(optprobe_template_entry + i))); if (rc < 0) goto error; @@ -254,20 +262,22 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) } rc = create_branch(&branch_op_callback, - (unsigned int *)buff + TMPL_CALL_HDLR_IDX, + (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX), (unsigned long)op_callback_addr, BRANCH_SET_LINK); rc |= create_branch(&branch_emulate_step, - (unsigned int *)buff + TMPL_EMULATE_IDX, + (struct ppc_inst *)(buff + TMPL_EMULATE_IDX), (unsigned long)emulate_step_addr, BRANCH_SET_LINK); if (rc) goto error; - patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback); - patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step); + patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX), + branch_op_callback); + patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX), + branch_emulate_step); /* * 3. load instruction to be emulated into relevant register, and @@ -277,7 +287,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) /* * 4. branch back from trampoline */ - patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0); + patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0); flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX])); @@ -309,7 +319,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op) void arch_optimize_kprobes(struct list_head *oplist) { - unsigned int instr; + struct ppc_inst instr; struct optimized_kprobe *op; struct optimized_kprobe *tmp; @@ -321,9 +331,9 @@ void arch_optimize_kprobes(struct list_head *oplist) memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE); create_branch(&instr, - (unsigned int *)op->kp.addr, + (struct ppc_inst *)op->kp.addr, (unsigned long)op->optinsn.insn, 0); - patch_instruction(op->kp.addr, instr); + patch_instruction((struct ppc_inst *)op->kp.addr, instr); list_del_init(&op->list); } } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 0536e4aed330..15f0a7c84944 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -74,8 +74,8 @@ EXPORT_SYMBOL(DMA_MODE_WRITE); */ notrace void __init machine_init(u64 dt_ptr) { - unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache); - unsigned int insn; + struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache); + struct ppc_inst insn; /* Configure static keys first, now that we're relocated. */ setup_feature_keys(); diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index cbb19af4a72a..3117ed675735 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c @@ -41,23 +41,23 @@ #define NUM_FTRACE_TRAMPS 8 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; -static unsigned int +static struct ppc_inst ftrace_call_replace(unsigned long ip, unsigned long addr, int link) { - unsigned int op; + struct ppc_inst op; addr = ppc_function_entry((void *)addr); /* if (link) set op to 'bl' else 'b' */ - create_branch(&op, (unsigned int *)ip, addr, link ? 1 : 0); + create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0); return op; } static int -ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) +ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new) { - unsigned int replaced; + struct ppc_inst replaced; /* * Note: @@ -79,7 +79,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) } /* replace the text with the new text */ - if (patch_instruction((unsigned int *)ip, new)) + if (patch_instruction((struct ppc_inst *)ip, new)) return -EPERM; return 0; @@ -90,24 +90,24 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) */ static int test_24bit_addr(unsigned long ip, unsigned long addr) { - unsigned int op; + struct ppc_inst op; addr = ppc_function_entry((void *)addr); /* use the create_branch to verify that this offset can be branched */ - return create_branch(&op, (unsigned int *)ip, addr, 0) == 0; + return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0; } -static int is_bl_op(unsigned int op) +static int is_bl_op(struct ppc_inst op) { return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; } -static int is_b_op(unsigned int op) +static int is_b_op(struct ppc_inst op) { return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; } -static unsigned long find_bl_target(unsigned long ip, unsigned int op) +static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op) { int offset; @@ -127,7 +127,7 @@ __ftrace_make_nop(struct module *mod, { unsigned long entry, ptr, tramp; unsigned long ip = rec->ip; - unsigned int op, pop; + struct ppc_inst op, pop; /* read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { @@ -208,7 +208,7 @@ __ftrace_make_nop(struct module *mod, } #endif /* CONFIG_MPROFILE_KERNEL */ - if (patch_instruction((unsigned int *)ip, pop)) { + if (patch_instruction((struct ppc_inst *)ip, pop)) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -221,7 +221,7 @@ static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op; + struct ppc_inst op; unsigned int jmp[4]; unsigned long ip = rec->ip; unsigned long tramp; @@ -280,7 +280,7 @@ __ftrace_make_nop(struct module *mod, op = ppc_inst(PPC_INST_NOP); - if (patch_instruction((unsigned int *)ip, op)) + if (patch_instruction((struct ppc_inst *)ip, op)) return -EPERM; return 0; @@ -291,7 +291,7 @@ __ftrace_make_nop(struct module *mod, static unsigned long find_ftrace_tramp(unsigned long ip) { int i; - unsigned int instr; + struct ppc_inst instr; /* * We have the compiler generated long_branch tramps at the end @@ -328,9 +328,10 @@ static int add_ftrace_tramp(unsigned long tramp) */ static int setup_mcount_compiler_tramp(unsigned long tramp) { - int i, op; + int i; + struct ppc_inst op; unsigned long ptr; - unsigned int instr; + struct ppc_inst instr; static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; /* Is this a known long jump tramp? */ @@ -379,7 +380,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) return -1; } - if (patch_branch((unsigned int *)tramp, ptr, 0)) { + if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) { pr_debug("REL24 out of range!\n"); return -1; } @@ -395,7 +396,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp) static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) { unsigned long tramp, ip = rec->ip; - unsigned int op; + struct ppc_inst op; /* Read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { @@ -423,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) } } - if (patch_instruction((unsigned int *)ip, ppc_inst(PPC_INST_NOP))) { + if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) { pr_err("Patching NOP failed.\n"); return -EPERM; } @@ -435,7 +436,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -488,7 +489,7 @@ int ftrace_make_nop(struct module *mod, */ #ifndef CONFIG_MPROFILE_KERNEL static int -expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) +expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) { /* * We expect to see: @@ -506,7 +507,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) } #else static int -expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) +expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1) { /* look for patched "NOP" on ppc64 with -mprofile-kernel */ if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP))) @@ -518,8 +519,8 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op[2]; - unsigned int instr; + struct ppc_inst op[2]; + struct ppc_inst instr; void *ip = (void *)rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -584,7 +585,7 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int err; - unsigned int op; + struct ppc_inst op; unsigned long ip = rec->ip; /* read where this goes */ @@ -604,7 +605,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) } /* create the branch to the trampoline */ - err = create_branch(&op, (unsigned int *)ip, + err = create_branch(&op, (struct ppc_inst *)ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK); if (err) { pr_err("REL24 out of range!\n"); @@ -613,7 +614,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) pr_devel("write to %lx\n", rec->ip); - if (patch_instruction((unsigned int *)ip, op)) + if (patch_instruction((struct ppc_inst *)ip, op)) return -EPERM; return 0; @@ -623,7 +624,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) { - unsigned int op; + struct ppc_inst op; void *ip = (void *)rec->ip; unsigned long tramp, entry, ptr; @@ -671,7 +672,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -710,7 +711,7 @@ static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { - unsigned int op; + struct ppc_inst op; unsigned long ip = rec->ip; unsigned long entry, ptr, tramp; struct module *mod = rec->arch.mod; @@ -758,7 +759,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, /* The new target may be within range */ if (test_24bit_addr(ip, addr)) { /* within range */ - if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) { + if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -786,12 +787,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } /* Ensure branch is within 24 bits */ - if (create_branch(&op, (unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) { pr_err("Branch out of range\n"); return -EINVAL; } - if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) { + if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) { pr_err("REL24 out of range!\n"); return -EINVAL; } @@ -804,7 +805,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr) { unsigned long ip = rec->ip; - unsigned int old, new; + struct ppc_inst old, new; /* * If the calling address is more that 24 bits away, @@ -844,10 +845,10 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); - unsigned int old, new; + struct ppc_inst old, new; int ret; - old = *(unsigned int *)&ftrace_call; + old = *(struct ppc_inst *)&ftrace_call; new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); @@ -855,7 +856,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) /* Also update the regs callback function */ if (!ret) { ip = (unsigned long)(&ftrace_regs_call); - old = *(unsigned int *)&ftrace_regs_call; + old = *(struct ppc_inst *)&ftrace_regs_call; new = ftrace_call_replace(ip, (unsigned long)func, 1); ret = ftrace_modify_code(ip, old, new); } @@ -929,7 +930,7 @@ int ftrace_enable_ftrace_graph_caller(void) unsigned long ip = (unsigned long)(&ftrace_graph_call); unsigned long addr = (unsigned long)(&ftrace_graph_caller); unsigned long stub = (unsigned long)(&ftrace_graph_stub); - unsigned int old, new; + struct ppc_inst old, new; old = ftrace_call_replace(ip, stub, 0); new = ftrace_call_replace(ip, addr, 0); @@ -942,7 +943,7 @@ int ftrace_disable_ftrace_graph_caller(void) unsigned long ip = (unsigned long)(&ftrace_graph_call); unsigned long addr = (unsigned long)(&ftrace_graph_caller); unsigned long stub = (unsigned long)(&ftrace_graph_stub); - unsigned int old, new; + struct ppc_inst old, new; old = ftrace_call_replace(ip, addr, 0); new = ftrace_call_replace(ip, stub, 0); diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c index a544590b90e5..3dd70eeb10c5 100644 --- a/arch/powerpc/kernel/vecemu.c +++ b/arch/powerpc/kernel/vecemu.c @@ -261,11 +261,12 @@ static unsigned int rfin(unsigned int x) int emulate_altivec(struct pt_regs *regs) { - unsigned int instr, i, word; + struct ppc_inst instr; + unsigned int i, word; unsigned int va, vb, vc, vd; vector128 *vrs; - if (get_user(instr, (unsigned int __user *) regs->nip)) + if (get_user(instr.val, (unsigned int __user *)regs->nip)) return -EFAULT; word = ppc_inst_val(instr); -- cgit