summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorJordan Niethe <jniethe5@gmail.com>2020-05-06 13:40:26 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2020-05-19 00:10:36 +1000
commit753462512868674a788ecc77bb96752efb818785 (patch)
tree02e2f30549082335849621bfdcce413e4166b252 /arch/powerpc/kernel
parent7c95d8893fb55869882c9f68f4c94840dc43f18f (diff)
powerpc: Use a macro for creating instructions from u32s
In preparation for instructions having a more complex data type start using a macro, ppc_inst(), for making an instruction out of a u32. A macro is used so that instructions can be used as initializer elements. Currently this does nothing, but it will allow for creating a data type that can represent prefixed instructions. Signed-off-by: Jordan Niethe <jniethe5@gmail.com> [mpe: Change include guard to _ASM_POWERPC_INST_H] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Reviewed-by: Alistair Popple <alistair@popple.id.au> Link: https://lore.kernel.org/r/20200506034050.24806-7-jniethe5@gmail.com
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/align.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c3
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c3
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/jump_label.c3
-rw-r--r--arch/powerpc/kernel/kgdb.c5
-rw-r--r--arch/powerpc/kernel/kprobes.c5
-rw-r--r--arch/powerpc/kernel/module_64.c3
-rw-r--r--arch/powerpc/kernel/optprobes.c32
-rw-r--r--arch/powerpc/kernel/security.c12
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c25
-rw-r--r--arch/powerpc/kernel/uprobes.c1
13 files changed, 57 insertions, 41 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 92045ed64976..86e9bf62f18c 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -24,6 +24,7 @@
#include <asm/disassemble.h>
#include <asm/cpu_has_feature.h>
#include <asm/sstep.h>
+#include <asm/inst.h>
struct aligninfo {
unsigned char len;
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 05745ddbd229..78e556b131db 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -18,6 +18,7 @@
#include <asm/firmware.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
+#include <asm/inst.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -44,7 +45,7 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
- patch_instruction(p, PPC_INST_NOP);
+ patch_instruction(p, ppc_inst(PPC_INST_NOP));
patch_branch(++p, addr + PHYSICAL_START, 0);
}
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 9d32158ce36f..e8eb72a65572 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -11,6 +11,7 @@
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/machdep.h>
+#include <asm/inst.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
@@ -36,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
return -1;
for (i = 0; i < (len / 4); i++) {
- u32 inst = be32_to_cpu(insts[i]);
+ u32 inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, inst);
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 72f461bd70fb..46e09ac8b84a 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -24,6 +24,7 @@
#include <asm/debug.h>
#include <asm/debugfs.h>
#include <asm/hvcall.h>
+#include <asm/inst.h>
#include <linux/uaccess.h>
/*
@@ -243,7 +244,7 @@ dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info)
static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
struct arch_hw_breakpoint *info)
{
- unsigned int instr = 0;
+ unsigned int instr = ppc_inst(0);
int ret, type, size;
struct instruction_op op;
unsigned long addr = info->address;
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index ca37702bde97..daa4afce7ec8 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
@@ -15,5 +16,5 @@ void arch_jump_label_transform(struct jump_entry *entry,
if (type == JUMP_LABEL_JMP)
patch_branch(addr, entry->target, 0);
else
- patch_instruction(addr, PPC_INST_NOP);
+ patch_instruction(addr, ppc_inst(PPC_INST_NOP));
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7dd55eb1259d..a6b38a19133f 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -26,6 +26,7 @@
#include <asm/debug.h>
#include <asm/code-patching.h>
#include <linux/slab.h>
+#include <asm/inst.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -424,7 +425,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return err;
- err = patch_instruction(addr, BREAK_INSTR);
+ err = patch_instruction(addr, ppc_inst(BREAK_INSTR));
if (err)
return -EFAULT;
@@ -439,7 +440,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
unsigned int instr = *(unsigned int *)bpt->saved_instr;
unsigned int *addr = (unsigned int *)bpt->bpt_addr;
- err = patch_instruction(addr, instr);
+ err = patch_instruction(addr, ppc_inst(instr));
if (err)
return -EFAULT;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 81efb605113e..2378a7ed4438 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -23,6 +23,7 @@
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
+#include <asm/inst.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -138,13 +139,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+ patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, p->opcode);
+ patch_instruction(p->addr, ppc_inst(p->opcode));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index f808159f3dfd..f390451ad915 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,6 +20,7 @@
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/sections.h>
+#include <asm/inst.h>
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -491,7 +492,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
* "link" branches and they don't return, so they don't need the r2
* restore afterwards.
*/
- if (!instr_is_relative_link_branch(*prev_insn))
+ if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 1;
if (*instruction != PPC_INST_NOP) {
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 445b3dad82dc..44006c4ca4f1 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -16,6 +16,7 @@
#include <asm/code-patching.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
+#include <asm/inst.h>
#define TMPL_CALL_HDLR_IDX \
(optprobe_template_call_handler - optprobe_template_entry)
@@ -147,13 +148,13 @@ void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
{
/* addis r4,0,(insn)@h */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0xffff));
+ patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(4) |
+ ((val >> 16) & 0xffff)));
addr++;
/* ori r4,r4,(insn)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
- ___PPC_RS(4) | (val & 0xffff));
+ patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(4) |
+ ___PPC_RS(4) | (val & 0xffff)));
}
/*
@@ -163,28 +164,28 @@ void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
{
/* lis r3,(op)@highest */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0xffff));
+ patch_instruction(addr, ppc_inst(PPC_INST_ADDIS | ___PPC_RT(3) |
+ ((val >> 48) & 0xffff)));
addr++;
/* ori r3,r3,(op)@higher */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 32) & 0xffff));
+ patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) |
+ ___PPC_RS(3) | ((val >> 32) & 0xffff)));
addr++;
/* rldicr r3,r3,32,31 */
- patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
- ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
+ patch_instruction(addr, ppc_inst(PPC_INST_RLDICR | ___PPC_RA(3) |
+ ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31)));
addr++;
/* oris r3,r3,(op)@h */
- patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 16) & 0xffff));
+ patch_instruction(addr, ppc_inst(PPC_INST_ORIS | ___PPC_RA(3) |
+ ___PPC_RS(3) | ((val >> 16) & 0xffff)));
addr++;
/* ori r3,r3,(op)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | (val & 0xffff));
+ patch_instruction(addr, ppc_inst(PPC_INST_ORI | ___PPC_RA(3) |
+ ___PPC_RS(3) | (val & 0xffff)));
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
@@ -230,7 +231,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
- rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
+ rc = patch_instruction(buff + i,
+ ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 479325baf6a9..d86701ce116b 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -16,6 +16,7 @@
#include <asm/debugfs.h>
#include <asm/security_features.h>
#include <asm/setup.h>
+#include <asm/inst.h>
u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
@@ -439,9 +440,11 @@ static void toggle_count_cache_flush(bool enable)
enable = false;
if (!enable) {
- patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
+ patch_instruction_site(&patch__call_flush_count_cache,
+ ppc_inst(PPC_INST_NOP));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
+ patch_instruction_site(&patch__call_kvm_flush_link_stack,
+ ppc_inst(PPC_INST_NOP));
#endif
pr_info("link-stack-flush: software flush disabled.\n");
link_stack_flush_enabled = false;
@@ -464,7 +467,8 @@ static void toggle_count_cache_flush(bool enable)
// If we just need to flush the link stack, patch an early return
if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
- patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
+ patch_instruction_site(&patch__flush_link_stack_return,
+ ppc_inst(PPC_INST_BLR));
no_count_cache_flush();
return;
}
@@ -475,7 +479,7 @@ static void toggle_count_cache_flush(bool enable)
return;
}
- patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
+ patch_instruction_site(&patch__flush_count_cache_return, ppc_inst(PPC_INST_BLR));
count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 3a43e8e847c8..0536e4aed330 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -85,7 +85,7 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
+ patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP));
create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 8799d891320c..00f69b7baa8a 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -27,6 +27,7 @@
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
+#include <asm/inst.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -161,7 +162,7 @@ __ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MPROFILE_KERNEL
/* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
+ pop = ppc_inst(PPC_INST_NOP);
if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
@@ -169,7 +170,7 @@ __ftrace_make_nop(struct module *mod,
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
+ if (op != ppc_inst(PPC_INST_MFLR) && op != ppc_inst(PPC_INST_STD_LR)) {
pr_err("Unexpected instruction %08x around bl _mcount\n", op);
return -EINVAL;
}
@@ -188,7 +189,7 @@ __ftrace_make_nop(struct module *mod,
* Use a b +8 to jump over the load.
*/
- pop = PPC_INST_BRANCH | 8; /* b +8 */
+ pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */
/*
* Check what is in the next instruction. We can see ld r2,40(r1), but
@@ -199,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
- if (op != PPC_INST_LD_TOC) {
+ if (op != ppc_inst(PPC_INST_LD_TOC)) {
pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
return -EINVAL;
}
@@ -275,7 +276,7 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- op = PPC_INST_NOP;
+ op = ppc_inst(PPC_INST_NOP);
if (patch_instruction((unsigned int *)ip, op))
return -EPERM;
@@ -420,7 +421,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
}
- if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) {
+ if (patch_instruction((unsigned int *)ip, ppc_inst(PPC_INST_NOP))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -442,7 +443,7 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = PPC_INST_NOP;
+ new = ppc_inst(PPC_INST_NOP);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
return __ftrace_make_nop_kernel(rec, addr);
@@ -496,7 +497,7 @@ expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
* The load offset is different depending on the ABI. For simplicity
* just mask it out when doing the compare.
*/
- if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
+ if (op0 != ppc_inst(0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
return 0;
return 1;
}
@@ -505,7 +506,7 @@ static int
expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
{
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (op0 != PPC_INST_NOP)
+ if (op0 != ppc_inst(PPC_INST_NOP))
return 0;
return 1;
}
@@ -588,7 +589,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
/* It should be pointing to a nop */
- if (op != PPC_INST_NOP) {
+ if (op != ppc_inst(PPC_INST_NOP)) {
pr_err("Expected NOP but have %x\n", op);
return -EINVAL;
}
@@ -645,7 +646,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
return -EFAULT;
}
- if (op != PPC_INST_NOP) {
+ if (op != ppc_inst(PPC_INST_NOP)) {
pr_err("Unexpected call sequence at %p: %x\n", ip, op);
return -EINVAL;
}
@@ -676,7 +677,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = PPC_INST_NOP;
+ old = ppc_inst(PPC_INST_NOP);
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 1cfef0e5fec5..31c870287f2b 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -14,6 +14,7 @@
#include <linux/kdebug.h>
#include <asm/sstep.h>
+#include <asm/inst.h>
#define UPROBE_TRAP_NR UINT_MAX