summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--arch/powerpc/lib/code-patching.c61
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/lib/sstep.c52
4 files changed, 27 insertions, 91 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 5d1881d2e39a..8560c912186d 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -13,6 +13,9 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
KASAN_SANITIZE_code-patching.o := n
KASAN_SANITIZE_feature-fixups.o := n
+# restart_table.o contains functions called in the NMI interrupt path
+# which can be in real mode. Disable KASAN.
+KASAN_SANITIZE_restart_table.o := n
ifdef CONFIG_KASAN
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 00c68e7fb11e..6edf0697a526 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/cpuhotplug.h>
#include <linux/uaccess.h>
+#include <linux/jump_label.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
@@ -32,7 +33,7 @@ static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr
return 0;
failed:
- return -EFAULT;
+ return -EPERM;
}
int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
@@ -78,6 +79,8 @@ static int text_area_cpu_down(unsigned int cpu)
return 0;
}
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
+
/*
* Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
* we judge it as being preferable to a kernel that will crash later when
@@ -88,6 +91,7 @@ void __init poking_init(void)
BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke:online", text_area_cpu_up,
text_area_cpu_down));
+ static_branch_enable(&poking_init_done);
}
/*
@@ -97,7 +101,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr)
{
unsigned long pfn;
- if (is_vmalloc_or_module_addr(addr))
+ if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
pfn = vmalloc_to_pfn(addr);
else
pfn = __pa_symbol(addr) >> PAGE_SHIFT;
@@ -170,7 +174,7 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
*/
- if (!this_cpu_read(text_poke_area))
+ if (!static_branch_likely(&poking_init_done))
return raw_patch_instruction(addr, instr);
local_irq_save(flags);
@@ -188,10 +192,12 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
#endif /* CONFIG_STRICT_KERNEL_RWX */
+__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
+
int patch_instruction(u32 *addr, ppc_inst_t instr)
{
/* Make sure we aren't patching a freed init section */
- if (system_state >= SYSTEM_FREEING_INITMEM && init_section_contains(addr, 4))
+ if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
return 0;
return do_patch_instruction(addr, instr);
@@ -208,33 +214,6 @@ int patch_branch(u32 *addr, unsigned long target, int flags)
return patch_instruction(addr, instr);
}
-bool is_offset_in_branch_range(long offset)
-{
- /*
- * Powerpc branch instruction is :
- *
- * 0 6 30 31
- * +---------+----------------+---+---+
- * | opcode | LI |AA |LK |
- * +---------+----------------+---+---+
- * Where AA = 0 and LK = 0
- *
- * LI is a signed 24 bits integer. The real branch offset is computed
- * by: imm32 = SignExtend(LI:'0b00', 32);
- *
- * So the maximum forward branch should be:
- * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
- * The maximum backward branch should be:
- * (0xff800000 << 2) = 0xfe000000 = -0x2000000
- */
- return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
-}
-
-bool is_offset_in_cond_branch_range(long offset)
-{
- return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
-}
-
/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
@@ -257,26 +236,6 @@ bool is_conditional_branch(ppc_inst_t instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(ppc_inst_t *instr, const u32 *addr,
- unsigned long target, int flags)
-{
- long offset;
-
- *instr = ppc_inst(0);
- offset = target;
- if (! (flags & BRANCH_ABSOLUTE))
- offset = offset - (unsigned long)addr;
-
- /* Check we can represent the target in the instruction format */
- if (!is_offset_in_branch_range(offset))
- return 1;
-
- /* Mask out the flags and target, so they don't step on each other. */
- *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
-
- return 0;
-}
-
int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags)
{
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 343a78826035..993d3f31832a 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -451,7 +451,7 @@ static int __do_rfi_flush_fixups(void *data)
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = PPC_INST_BRANCH | 16;
+ instrs[0] = PPC_RAW_BRANCH(16);
i = 0;
if (types & L1D_FLUSH_ORI) {
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 6f79bde6d6c2..398b5694aeb7 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -15,9 +15,6 @@
#include <asm/cputable.h>
#include <asm/disassemble.h>
-extern char system_call_common[];
-extern char system_call_vectored_emulate[];
-
#ifdef CONFIG_PPC64
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffffUL
@@ -1166,7 +1163,7 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
if (carry_in)
++val;
- op->type = COMPUTE + SETREG + SETXER;
+ op->type = COMPUTE | SETREG | SETXER;
op->reg = rd;
op->val = val;
val = truncate_if_32bit(regs->msr, val);
@@ -1187,7 +1184,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1206,7 +1203,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1376,7 +1373,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
-#ifdef CONFIG_PPC64
case 17: /* sc */
if ((word & 0xfe2) == 2)
op->type = SYSCALL;
@@ -1388,7 +1384,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
} else
op->type = UNKNOWN;
return 0;
-#endif
case 18: /* b */
op->type = BRANCH | BRTAKEN;
imm = word & 0x03fffffc;
@@ -3643,43 +3638,22 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
-#ifdef CONFIG_PPC64
case SYSCALL: /* sc */
/*
- * N.B. this uses knowledge about how the syscall
- * entry code works. If that is changed, this will
- * need to be changed also.
+ * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
+ * single step a system call instruction:
+ *
+ * Successful completion for an instruction means that the
+ * instruction caused no other interrupt. Thus a Trace
+ * interrupt never occurs for a System Call or System Call
+ * Vectored instruction, or for a Trap instruction that
+ * traps.
*/
- if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
- cpu_has_feature(CPU_FTR_REAL_LE) &&
- regs->gpr[0] == 0x1ebe) {
- regs_set_return_msr(regs, regs->msr ^ MSR_LE);
- goto instr_done;
- }
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs_set_return_ip(regs, (unsigned long) &system_call_common);
- regs_set_return_msr(regs, MSR_KERNEL);
- return 1;
-
-#ifdef CONFIG_PPC_BOOK3S_64
+ return -1;
case SYSCALL_VECTORED_0: /* scv 0 */
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
- regs_set_return_msr(regs, MSR_KERNEL);
- return 1;
-#endif
-
+ return -1;
case RFI:
return -1;
-#endif
}
return 0;