summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r--arch/powerpc/lib/Makefile26
-rw-r--r--arch/powerpc/lib/alloc.c23
-rw-r--r--arch/powerpc/lib/checksum_32.S5
-rw-r--r--arch/powerpc/lib/checksum_64.S2
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c2
-rw-r--r--arch/powerpc/lib/code-patching.c990
-rw-r--r--arch/powerpc/lib/copy_32.S5
-rw-r--r--arch/powerpc/lib/copy_mc_64.S2
-rw-r--r--arch/powerpc/lib/copypage_64.S19
-rw-r--r--arch/powerpc/lib/copypage_power7.S16
-rw-r--r--arch/powerpc/lib/copyuser_64.S2
-rw-r--r--arch/powerpc/lib/copyuser_power7.S20
-rw-r--r--arch/powerpc/lib/crtsavres.S2
-rw-r--r--arch/powerpc/lib/feature-fixups.c261
-rw-r--r--arch/powerpc/lib/hweight_64.S10
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/memcmp_32.S2
-rw-r--r--arch/powerpc/lib/memcmp_64.S6
-rw-r--r--arch/powerpc/lib/memcpy_64.S2
-rw-r--r--arch/powerpc/lib/memcpy_power7.S16
-rw-r--r--arch/powerpc/lib/pmem.c7
-rw-r--r--arch/powerpc/lib/qspinlock.c998
-rw-r--r--arch/powerpc/lib/sstep.c357
-rw-r--r--arch/powerpc/lib/string.S2
-rw-r--r--arch/powerpc/lib/string_32.S2
-rw-r--r--arch/powerpc/lib/string_64.S9
-rw-r--r--arch/powerpc/lib/strlen_32.S2
-rw-r--r--arch/powerpc/lib/test-code-patching.c495
-rw-r--r--arch/powerpc/lib/test_code-patching.S20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c27
-rw-r--r--arch/powerpc/lib/test_emulate_step_exec_instr.S10
-rw-r--r--arch/powerpc/lib/vmx-helper.c13
-rw-r--r--arch/powerpc/lib/xor_vmx.c28
-rw-r--r--arch/powerpc/lib/xor_vmx.h27
-rw-r--r--arch/powerpc/lib/xor_vmx_glue.c32
35 files changed, 2449 insertions, 993 deletions
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 99a7c9132422..f14ecab674a3 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -3,8 +3,6 @@
# Makefile for ppc-specific library files..
#
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
CFLAGS_code-patching.o += -fno-stack-protector
CFLAGS_feature-fixups.o += -fno-stack-protector
@@ -13,13 +11,23 @@ CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
KASAN_SANITIZE_code-patching.o := n
KASAN_SANITIZE_feature-fixups.o := n
+# restart_table.o contains functions called in the NMI interrupt path
+# which can be in real mode. Disable KASAN.
+KASAN_SANITIZE_restart_table.o := n
+KCSAN_SANITIZE_code-patching.o := n
+KCSAN_SANITIZE_feature-fixups.o := n
ifdef CONFIG_KASAN
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y += alloc.o code-patching.o feature-fixups.o pmem.o test_code-patching.o
+CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
+obj-y += code-patching.o feature-fixups.o pmem.o
+
+obj-$(CONFIG_CODE_PATCHING_SELFTEST) += test-code-patching.o
ifndef CONFIG_KASAN
obj-y += string.o memcmp_$(BITS).o
@@ -34,8 +42,8 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
# so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs
-ifeq ($(call ld-ifversion, -lt, 22500, y),y)
-extra-$(CONFIG_PPC64) += crtsavres.o
+ifndef CONFIG_LD_IS_BFD
+always-$(CONFIG_PPC64) += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
@@ -44,7 +52,9 @@ obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
memcpy_64.o copy_mc_64.o
-ifndef CONFIG_PPC_QUEUED_SPINLOCKS
+ifdef CONFIG_PPC_QUEUED_SPINLOCKS
+obj-$(CONFIG_SMP) += qspinlock.o
+else
obj64-$(CONFIG_SMP) += locks.o
endif
@@ -64,6 +74,8 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
-CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
+CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
+# Enable <altivec.h>
+CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
deleted file mode 100644
index ce180870bd52..000000000000
--- a/arch/powerpc/lib/alloc.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/memblock.h>
-#include <linux/string.h>
-#include <asm/setup.h>
-
-
-void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
-{
- void *p;
-
- if (slab_is_available())
- p = kzalloc(size, mask);
- else {
- p = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!p)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- size);
- }
- return p;
-}
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index 27d9070617df..cd00b9bdd772 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -8,12 +8,12 @@
* Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
*/
+#include <linux/export.h>
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
.text
@@ -116,9 +116,6 @@ EXPORT_SYMBOL(__csum_partial)
EX_TABLE(8 ## n ## 7b, fault);
.text
- .stabs "arch/powerpc/lib/",N_SO,0,0,0f
- .stabs "checksum_32.S",N_SO,0,0,0f
-0:
CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 98ff51bd2f7d..d53d8f09a2c2 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -8,11 +8,11 @@
* Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
*/
+#include <linux/export.h>
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
/*
* Computes the checksum of a memory block at buff, length len,
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index f3999cbb2fcc..1a14c8780278 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -24,7 +24,6 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
user_read_access_end();
return csum;
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
@@ -38,4 +37,3 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
user_write_access_end();
return csum;
}
-EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index f9a3019e37b4..f84e0337cc02 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -3,30 +3,31 @@
* Copyright 2008 Michael Ellerman, IBM Corporation.
*/
-#include <linux/kernel.h>
#include <linux/kprobes.h>
+#include <linux/mmu_context.h>
+#include <linux/random.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/cpuhotplug.h>
-#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/jump_label.h>
+#include <asm/debug.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
-#include <asm/code-patching.h>
-#include <asm/setup.h>
+#include <asm/text-patching.h>
#include <asm/inst.h>
-static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr)
+static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
{
- if (!ppc_inst_prefixed(instr)) {
- u32 val = ppc_inst_val(instr);
+ if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
+ /* For big endian correctness: plain address would use the wrong half */
+ u32 val32 = val;
- __put_kernel_nofault(patch_addr, &val, u32, failed);
+ __put_kernel_nofault(patch_addr, &val32, u32, failed);
} else {
- u64 val = ppc_inst_as_ulong(instr);
-
__put_kernel_nofault(patch_addr, &val, u64, failed);
}
@@ -36,48 +37,200 @@ static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch
return 0;
failed:
- return -EFAULT;
+ mb(); /* sync */
+ return -EPERM;
+}
+
+int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ if (ppc_inst_prefixed(instr))
+ return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
+ else
+ return __patch_mem(addr, ppc_inst_val(instr), addr, false);
}
-int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
+struct patch_context {
+ union {
+ struct vm_struct *area;
+ struct mm_struct *mm;
+ };
+ unsigned long addr;
+ pte_t *pte;
+};
+
+static DEFINE_PER_CPU(struct patch_context, cpu_patching_context);
+
+static int map_patch_area(void *addr, unsigned long text_poke_addr);
+static void unmap_patch_area(unsigned long addr);
+
+static bool mm_patch_enabled(void)
{
- return __patch_instruction(addr, instr, addr);
+ return IS_ENABLED(CONFIG_SMP) && radix_enabled();
}
-#ifdef CONFIG_STRICT_KERNEL_RWX
-static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
+/*
+ * The following applies for Radix MMU. Hash MMU has different requirements,
+ * and so is not supported.
+ *
+ * Changing mm requires context synchronising instructions on both sides of
+ * the context switch, as well as a hwsync between the last instruction for
+ * which the address of an associated storage access was translated using
+ * the current context.
+ *
+ * switch_mm_irqs_off() performs an isync after the context switch. It is
+ * the responsibility of the caller to perform the CSI and hwsync before
+ * starting/stopping the temp mm.
+ */
+static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm)
+{
+ struct mm_struct *orig_mm = current->active_mm;
+
+ lockdep_assert_irqs_disabled();
+ switch_mm_irqs_off(orig_mm, temp_mm, current);
+
+ WARN_ON(!mm_is_thread_local(temp_mm));
+
+ suspend_breakpoints();
+ return orig_mm;
+}
+
+static void stop_using_temp_mm(struct mm_struct *temp_mm,
+ struct mm_struct *orig_mm)
+{
+ lockdep_assert_irqs_disabled();
+ switch_mm_irqs_off(temp_mm, orig_mm, current);
+ restore_breakpoints();
+}
static int text_area_cpu_up(unsigned int cpu)
{
struct vm_struct *area;
+ unsigned long addr;
+ int err;
- area = get_vm_area(PAGE_SIZE, VM_ALLOC);
+ area = get_vm_area(PAGE_SIZE, 0);
if (!area) {
WARN_ONCE(1, "Failed to create text area for cpu %d\n",
cpu);
return -1;
}
- this_cpu_write(text_poke_area, area);
+
+ // Map/unmap the area to ensure all page tables are pre-allocated
+ addr = (unsigned long)area->addr;
+ err = map_patch_area(empty_zero_page, addr);
+ if (err)
+ return err;
+
+ unmap_patch_area(addr);
+
+ this_cpu_write(cpu_patching_context.area, area);
+ this_cpu_write(cpu_patching_context.addr, addr);
+ this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr));
return 0;
}
static int text_area_cpu_down(unsigned int cpu)
{
- free_vm_area(this_cpu_read(text_poke_area));
+ free_vm_area(this_cpu_read(cpu_patching_context.area));
+ this_cpu_write(cpu_patching_context.area, NULL);
+ this_cpu_write(cpu_patching_context.addr, 0);
+ this_cpu_write(cpu_patching_context.pte, NULL);
return 0;
}
-/*
- * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
- * we judge it as being preferable to a kernel that will crash later when
- * someone tries to use patch_instruction().
- */
+static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr)
+{
+ struct mmu_gather tlb;
+
+ tlb_gather_mmu(&tlb, mm);
+ free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0);
+ mmput(mm);
+}
+
+static int text_area_cpu_up_mm(unsigned int cpu)
+{
+ struct mm_struct *mm;
+ unsigned long addr;
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ mm = mm_alloc();
+ if (WARN_ON(!mm))
+ goto fail_no_mm;
+
+ /*
+ * Choose a random page-aligned address from the interval
+ * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE].
+ * The lower address bound is PAGE_SIZE to avoid the zero-page.
+ */
+ addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT;
+
+ /*
+ * PTE allocation uses GFP_KERNEL which means we need to
+ * pre-allocate the PTE here because we cannot do the
+ * allocation during patching when IRQs are disabled.
+ *
+ * Using get_locked_pte() to avoid open coding, the lock
+ * is unnecessary.
+ */
+ pte = get_locked_pte(mm, addr, &ptl);
+ if (!pte)
+ goto fail_no_pte;
+ pte_unmap_unlock(pte, ptl);
+
+ this_cpu_write(cpu_patching_context.mm, mm);
+ this_cpu_write(cpu_patching_context.addr, addr);
+
+ return 0;
+
+fail_no_pte:
+ put_patching_mm(mm, addr);
+fail_no_mm:
+ return -ENOMEM;
+}
+
+static int text_area_cpu_down_mm(unsigned int cpu)
+{
+ put_patching_mm(this_cpu_read(cpu_patching_context.mm),
+ this_cpu_read(cpu_patching_context.addr));
+
+ this_cpu_write(cpu_patching_context.mm, NULL);
+ this_cpu_write(cpu_patching_context.addr, 0);
+
+ return 0;
+}
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
+
void __init poking_init(void)
{
- BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
- "powerpc/text_poke:online", text_area_cpu_up,
- text_area_cpu_down));
+ int ret;
+
+ if (mm_patch_enabled())
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "powerpc/text_poke_mm:online",
+ text_area_cpu_up_mm,
+ text_area_cpu_down_mm);
+ else
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "powerpc/text_poke:online",
+ text_area_cpu_up,
+ text_area_cpu_down);
+
+ /* cpuhp_setup_state returns >= 0 on success */
+ if (WARN_ON(ret < 0))
+ return;
+
+ static_branch_enable(&poking_init_done);
+}
+
+static unsigned long get_patch_pfn(void *addr)
+{
+ if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr))
+ return vmalloc_to_pfn(addr);
+ else
+ return __pa_symbol(addr) >> PAGE_SHIFT;
}
/*
@@ -85,24 +238,12 @@ void __init poking_init(void)
*/
static int map_patch_area(void *addr, unsigned long text_poke_addr)
{
- unsigned long pfn;
- int err;
-
- if (is_vmalloc_or_module_addr(addr))
- pfn = vmalloc_to_pfn(addr);
- else
- pfn = __pa_symbol(addr) >> PAGE_SHIFT;
-
- err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
+ unsigned long pfn = get_patch_pfn(addr);
- pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
- if (err)
- return -1;
-
- return 0;
+ return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
}
-static inline int unmap_patch_area(unsigned long addr)
+static void unmap_patch_area(unsigned long addr)
{
pte_t *ptep;
pmd_t *pmdp;
@@ -111,128 +252,344 @@ static inline int unmap_patch_area(unsigned long addr)
pgd_t *pgdp;
pgdp = pgd_offset_k(addr);
- if (unlikely(!pgdp))
- return -EINVAL;
+ if (WARN_ON(pgd_none(*pgdp)))
+ return;
p4dp = p4d_offset(pgdp, addr);
- if (unlikely(!p4dp))
- return -EINVAL;
+ if (WARN_ON(p4d_none(*p4dp)))
+ return;
pudp = pud_offset(p4dp, addr);
- if (unlikely(!pudp))
- return -EINVAL;
+ if (WARN_ON(pud_none(*pudp)))
+ return;
pmdp = pmd_offset(pudp, addr);
- if (unlikely(!pmdp))
- return -EINVAL;
+ if (WARN_ON(pmd_none(*pmdp)))
+ return;
ptep = pte_offset_kernel(pmdp, addr);
- if (unlikely(!ptep))
- return -EINVAL;
-
- pr_devel("clearing mm %p, pte %p, addr %lx\n", &init_mm, ptep, addr);
+ if (WARN_ON(pte_none(*ptep)))
+ return;
/*
* In hash, pte_clear flushes the tlb, in radix, we have to
*/
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+}
- return 0;
+static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
+{
+ int err;
+ u32 *patch_addr;
+ unsigned long text_poke_addr;
+ pte_t *pte;
+ unsigned long pfn = get_patch_pfn(addr);
+ struct mm_struct *patching_mm;
+ struct mm_struct *orig_mm;
+ spinlock_t *ptl;
+
+ patching_mm = __this_cpu_read(cpu_patching_context.mm);
+ text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+ if (!pte)
+ return -ENOMEM;
+
+ __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+
+ /* order PTE update before use, also serves as the hwsync */
+ asm volatile("ptesync": : :"memory");
+
+ /* order context switch after arbitrary prior code */
+ isync();
+
+ orig_mm = start_using_temp_mm(patching_mm);
+
+ err = __patch_mem(addr, val, patch_addr, is_dword);
+
+ /* context synchronisation performed by __patch_instruction (isync or exception) */
+ stop_using_temp_mm(patching_mm, orig_mm);
+
+ pte_clear(patching_mm, text_poke_addr, pte);
+ /*
+ * ptesync to order PTE update before TLB invalidation done
+ * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
+ */
+ local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+
+ pte_unmap_unlock(pte, ptl);
+
+ return err;
}
-static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
+static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
{
int err;
- u32 *patch_addr = NULL;
- unsigned long flags;
+ u32 *patch_addr;
unsigned long text_poke_addr;
- unsigned long kaddr = (unsigned long)addr;
+ pte_t *pte;
+ unsigned long pfn = get_patch_pfn(addr);
+
+ text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = __this_cpu_read(cpu_patching_context.pte);
+ __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync": : :"memory");
+
+ err = __patch_mem(addr, val, patch_addr, is_dword);
+
+ pte_clear(&init_mm, text_poke_addr, pte);
+ flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+
+ return err;
+}
+
+static int patch_mem(void *addr, unsigned long val, bool is_dword)
+{
+ int err;
+ unsigned long flags;
/*
* During early early boot patch_instruction is called
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
*/
- if (!this_cpu_read(text_poke_area))
- return raw_patch_instruction(addr, instr);
+ if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
+ !static_branch_likely(&poking_init_done))
+ return __patch_mem(addr, val, addr, is_dword);
local_irq_save(flags);
+ if (mm_patch_enabled())
+ err = __do_patch_mem_mm(addr, val, is_dword);
+ else
+ err = __do_patch_mem(addr, val, is_dword);
+ local_irq_restore(flags);
- text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
- if (map_patch_area(addr, text_poke_addr)) {
- err = -1;
- goto out;
- }
+ return err;
+}
+
+#ifdef CONFIG_PPC64
- patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
+int patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ if (ppc_inst_prefixed(instr))
+ return patch_mem(addr, ppc_inst_as_ulong(instr), true);
+ else
+ return patch_mem(addr, ppc_inst_val(instr), false);
+}
+NOKPROBE_SYMBOL(patch_instruction);
- __patch_instruction(addr, instr, patch_addr);
+int patch_uint(void *addr, unsigned int val)
+{
+ if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
+ return -EINVAL;
- err = unmap_patch_area(text_poke_addr);
- if (err)
- pr_warn("failed to unmap %lx\n", text_poke_addr);
+ return patch_mem(addr, val, false);
+}
+NOKPROBE_SYMBOL(patch_uint);
-out:
- local_irq_restore(flags);
+int patch_ulong(void *addr, unsigned long val)
+{
+ if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
+ return -EINVAL;
- return err;
+ return patch_mem(addr, val, true);
}
-#else /* !CONFIG_STRICT_KERNEL_RWX */
+NOKPROBE_SYMBOL(patch_ulong);
+
+#else
-static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
+int patch_instruction(u32 *addr, ppc_inst_t instr)
{
- return raw_patch_instruction(addr, instr);
+ return patch_mem(addr, ppc_inst_val(instr), false);
}
+NOKPROBE_SYMBOL(patch_instruction)
-#endif /* CONFIG_STRICT_KERNEL_RWX */
+#endif
-int patch_instruction(u32 *addr, struct ppc_inst instr)
+static int patch_memset64(u64 *addr, u64 val, size_t count)
{
- /* Make sure we aren't patching a freed init section */
- if (init_mem_is_free && init_section_contains(addr, 4)) {
- pr_debug("Skipping init section patching addr: 0x%px\n", addr);
- return 0;
- }
- return do_patch_instruction(addr, instr);
+ for (u64 *end = addr + count; addr < end; addr++)
+ __put_kernel_nofault(addr, &val, u64, failed);
+
+ return 0;
+
+failed:
+ return -EPERM;
}
-NOKPROBE_SYMBOL(patch_instruction);
-int patch_branch(u32 *addr, unsigned long target, int flags)
+static int patch_memset32(u32 *addr, u32 val, size_t count)
{
- struct ppc_inst instr;
+ for (u32 *end = addr + count; addr < end; addr++)
+ __put_kernel_nofault(addr, &val, u32, failed);
- create_branch(&instr, addr, target, flags);
- return patch_instruction(addr, instr);
+ return 0;
+
+failed:
+ return -EPERM;
}
-bool is_offset_in_branch_range(long offset)
+static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
{
+ unsigned long start = (unsigned long)patch_addr;
+ int err;
+
+ /* Repeat instruction */
+ if (repeat_instr) {
+ ppc_inst_t instr = ppc_inst_read(code);
+
+ if (ppc_inst_prefixed(instr)) {
+ u64 val = ppc_inst_as_ulong(instr);
+
+ err = patch_memset64((u64 *)patch_addr, val, len / 8);
+ } else {
+ u32 val = ppc_inst_val(instr);
+
+ err = patch_memset32(patch_addr, val, len / 4);
+ }
+ } else {
+ err = copy_to_kernel_nofault(patch_addr, code, len);
+ }
+
+ smp_wmb(); /* smp write barrier */
+ flush_icache_range(start, start + len);
+ return err;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ struct mm_struct *patching_mm, *orig_mm;
+ unsigned long pfn = get_patch_pfn(addr);
+ unsigned long text_poke_addr;
+ spinlock_t *ptl;
+ u32 *patch_addr;
+ pte_t *pte;
+ int err;
+
+ patching_mm = __this_cpu_read(cpu_patching_context.mm);
+ text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
+ if (!pte)
+ return -ENOMEM;
+
+ __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+
+ /* order PTE update before use, also serves as the hwsync */
+ asm volatile("ptesync" ::: "memory");
+
+ /* order context switch after arbitrary prior code */
+ isync();
+
+ orig_mm = start_using_temp_mm(patching_mm);
+
+ kasan_disable_current();
+ err = __patch_instructions(patch_addr, code, len, repeat_instr);
+ kasan_enable_current();
+
+ /* context synchronisation performed by __patch_instructions */
+ stop_using_temp_mm(patching_mm, orig_mm);
+
+ pte_clear(patching_mm, text_poke_addr, pte);
/*
- * Powerpc branch instruction is :
- *
- * 0 6 30 31
- * +---------+----------------+---+---+
- * | opcode | LI |AA |LK |
- * +---------+----------------+---+---+
- * Where AA = 0 and LK = 0
- *
- * LI is a signed 24 bits integer. The real branch offset is computed
- * by: imm32 = SignExtend(LI:'0b00', 32);
- *
- * So the maximum forward branch should be:
- * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
- * The maximum backward branch should be:
- * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ * ptesync to order PTE update before TLB invalidation done
+ * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
*/
- return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
+ local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
+
+ pte_unmap_unlock(pte, ptl);
+
+ return err;
+}
+
+/*
+ * A page is mapped and instructions that fit the page are patched.
+ * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
+ */
+static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ unsigned long pfn = get_patch_pfn(addr);
+ unsigned long text_poke_addr;
+ u32 *patch_addr;
+ pte_t *pte;
+ int err;
+
+ text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = __this_cpu_read(cpu_patching_context.pte);
+ __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync" ::: "memory");
+
+ err = __patch_instructions(patch_addr, code, len, repeat_instr);
+
+ pte_clear(&init_mm, text_poke_addr, pte);
+ flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+
+ return err;
+}
+
+/*
+ * Patch 'addr' with 'len' bytes of instructions from 'code'.
+ *
+ * If repeat_instr is true, the same instruction is filled for
+ * 'len' bytes.
+ */
+int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
+{
+ while (len > 0) {
+ unsigned long flags;
+ size_t plen;
+ int err;
+
+ plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
+
+ local_irq_save(flags);
+ if (mm_patch_enabled())
+ err = __do_patch_instructions_mm(addr, code, plen, repeat_instr);
+ else
+ err = __do_patch_instructions(addr, code, plen, repeat_instr);
+ local_irq_restore(flags);
+ if (err)
+ return err;
+
+ len -= plen;
+ addr = (u32 *)((unsigned long)addr + plen);
+ if (!repeat_instr)
+ code = (u32 *)((unsigned long)code + plen);
+ }
+
+ return 0;
+}
+NOKPROBE_SYMBOL(patch_instructions);
+
+int patch_branch(u32 *addr, unsigned long target, int flags)
+{
+ ppc_inst_t instr;
+
+ if (create_branch(&instr, addr, target, flags))
+ return -ERANGE;
+
+ return patch_instruction(addr, instr);
}
/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
*/
-bool is_conditional_branch(struct ppc_inst instr)
+bool is_conditional_branch(ppc_inst_t instr)
{
unsigned int opcode = ppc_inst_primary_opcode(instr);
@@ -250,27 +607,7 @@ bool is_conditional_branch(struct ppc_inst instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(struct ppc_inst *instr, const u32 *addr,
- unsigned long target, int flags)
-{
- long offset;
-
- *instr = ppc_inst(0);
- offset = target;
- if (! (flags & BRANCH_ABSOLUTE))
- offset = offset - (unsigned long)addr;
-
- /* Check we can represent the target in the instruction format */
- if (!is_offset_in_branch_range(offset))
- return 1;
-
- /* Mask out the flags and target, so they don't step on each other. */
- *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
-
- return 0;
-}
-
-int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
+int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -280,7 +617,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
- if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
+ if (!is_offset_in_cond_branch_range(offset))
return 1;
/* Mask out the flags and target, so they don't step on each other. */
@@ -289,22 +626,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
return 0;
}
-static unsigned int branch_opcode(struct ppc_inst instr)
-{
- return ppc_inst_primary_opcode(instr) & 0x3F;
-}
-
-static int instr_is_branch_iform(struct ppc_inst instr)
-{
- return branch_opcode(instr) == 18;
-}
-
-static int instr_is_branch_bform(struct ppc_inst instr)
-{
- return branch_opcode(instr) == 16;
-}
-
-int instr_is_relative_branch(struct ppc_inst instr)
+int instr_is_relative_branch(ppc_inst_t instr)
{
if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
return 0;
@@ -312,7 +634,7 @@ int instr_is_relative_branch(struct ppc_inst instr)
return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
}
-int instr_is_relative_link_branch(struct ppc_inst instr)
+int instr_is_relative_link_branch(ppc_inst_t instr)
{
return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
@@ -359,7 +681,7 @@ unsigned long branch_target(const u32 *instr)
return 0;
}
-int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
+int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
{
unsigned long target;
target = branch_target(src);
@@ -373,375 +695,3 @@ int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
return 1;
}
-
-#ifdef CONFIG_PPC_BOOK3E_64
-void __patch_exception(int exc, unsigned long addr)
-{
- extern unsigned int interrupt_base_book3e;
- unsigned int *ibase = &interrupt_base_book3e;
-
- /* Our exceptions vectors start with a NOP and -then- a branch
- * to deal with single stepping from userspace which stops on
- * the second instruction. Thus we need to patch the second
- * instruction of the exception, not the first one
- */
-
- patch_branch(ibase + (exc / 4) + 1, addr, 0);
-}
-#endif
-
-#ifdef CONFIG_CODE_PATCHING_SELFTEST
-
-static int instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
-{
- if (instr_is_branch_iform(ppc_inst_read(instr)) ||
- instr_is_branch_bform(ppc_inst_read(instr)))
- return branch_target(instr) == addr;
-
- return 0;
-}
-
-static void __init test_trampoline(void)
-{
- asm ("nop;\n");
-}
-
-#define check(x) \
- if (!(x)) printk("code-patching: test failed at line %d\n", __LINE__);
-
-static void __init test_branch_iform(void)
-{
- int err;
- struct ppc_inst instr;
- u32 tmp[2];
- u32 *iptr = tmp;
- unsigned long addr = (unsigned long)tmp;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_iform(ppc_inst(0x48000000)));
- /* All bits of target set, and flags */
- check(instr_is_branch_iform(ppc_inst(0x4bffffff)));
- /* High bit of opcode set, which is wrong */
- check(!instr_is_branch_iform(ppc_inst(0xcbffffff)));
- /* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_iform(ppc_inst(0x7bffffff)));
-
- /* Simplest case, branch to self with link */
- check(instr_is_branch_iform(ppc_inst(0x48000001)));
- /* All bits of targets set */
- check(instr_is_branch_iform(ppc_inst(0x4bfffffd)));
- /* Some bits of targets set */
- check(instr_is_branch_iform(ppc_inst(0x4bff00fd)));
- /* Must be a valid branch to start with */
- check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
-
- /* Absolute branch to 0x100 */
- patch_instruction(iptr, ppc_inst(0x48000103));
- check(instr_is_branch_to_addr(iptr, 0x100));
- /* Absolute branch to 0x420fc */
- patch_instruction(iptr, ppc_inst(0x480420ff));
- check(instr_is_branch_to_addr(iptr, 0x420fc));
- /* Maximum positive relative branch, + 20MB - 4B */
- patch_instruction(iptr, ppc_inst(0x49fffffc));
- check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
- /* Smallest negative relative branch, - 4B */
- patch_instruction(iptr, ppc_inst(0x4bfffffc));
- check(instr_is_branch_to_addr(iptr, addr - 4));
- /* Largest negative relative branch, - 32 MB */
- patch_instruction(iptr, ppc_inst(0x4a000000));
- check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
-
- /* Branch to self, with link */
- err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
-
- /* Branch to self - 0x100, with link */
- err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x100));
-
- /* Branch to self + 0x100, no link */
- err = create_branch(&instr, iptr, addr + 0x100, 0);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 MB */
- err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
-
- /* Out of range relative negative offset, - 32 MB + 4*/
- err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
- check(err);
-
- /* Out of range relative positive offset, + 32 MB */
- err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
- check(err);
-
- /* Unaligned target */
- err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
- check(err);
-
- /* Check flags are masked correctly */
- err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
- check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
-}
-
-static void __init test_create_function_call(void)
-{
- u32 *iptr;
- unsigned long dest;
- struct ppc_inst instr;
-
- /* Check we can create a function call */
- iptr = (u32 *)ppc_function_entry(test_trampoline);
- dest = ppc_function_entry(test_create_function_call);
- create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, dest));
-}
-
-static void __init test_branch_bform(void)
-{
- int err;
- unsigned long addr;
- struct ppc_inst instr;
- u32 tmp[2];
- u32 *iptr = tmp;
- unsigned int flags;
-
- addr = (unsigned long)iptr;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_bform(ppc_inst(0x40000000)));
- /* All bits of target set, and flags */
- check(instr_is_branch_bform(ppc_inst(0x43ffffff)));
- /* High bit of opcode set, which is wrong */
- check(!instr_is_branch_bform(ppc_inst(0xc3ffffff)));
- /* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
-
- /* Absolute conditional branch to 0x100 */
- patch_instruction(iptr, ppc_inst(0x43ff0103));
- check(instr_is_branch_to_addr(iptr, 0x100));
- /* Absolute conditional branch to 0x20fc */
- patch_instruction(iptr, ppc_inst(0x43ff20ff));
- check(instr_is_branch_to_addr(iptr, 0x20fc));
- /* Maximum positive relative conditional branch, + 32 KB - 4B */
- patch_instruction(iptr, ppc_inst(0x43ff7ffc));
- check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
- /* Smallest negative relative conditional branch, - 4B */
- patch_instruction(iptr, ppc_inst(0x43fffffc));
- check(instr_is_branch_to_addr(iptr, addr - 4));
- /* Largest negative relative conditional branch, - 32 KB */
- patch_instruction(iptr, ppc_inst(0x43ff8000));
- check(instr_is_branch_to_addr(iptr, addr - 0x8000));
-
- /* All condition code bits set & link */
- flags = 0x3ff000 | BRANCH_SET_LINK;
-
- /* Branch to self */
- err = create_cond_branch(&instr, iptr, addr, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
-
- /* Branch to self - 0x100 */
- err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x100));
-
- /* Branch to self + 0x100 */
- err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 KB */
- err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x8000));
-
- /* Out of range relative negative offset, - 32 KB + 4*/
- err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
- check(err);
-
- /* Out of range relative positive offset, + 32 KB */
- err = create_cond_branch(&instr, iptr, addr + 0x8000, flags);
- check(err);
-
- /* Unaligned target */
- err = create_cond_branch(&instr, iptr, addr + 3, flags);
- check(err);
-
- /* Check flags are masked correctly */
- err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
- check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
-}
-
-static void __init test_translate_branch(void)
-{
- unsigned long addr;
- void *p, *q;
- struct ppc_inst instr;
- void *buf;
-
- buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
- check(buf);
- if (!buf)
- return;
-
- /* Simple case, branch to self moved a little */
- p = buf;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- check(instr_is_branch_to_addr(p, addr));
- q = p + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(q, addr));
-
- /* Maximum negative case, move b . to addr + 32 MB */
- p = buf;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- q = buf + 0x2000000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
-
- /* Maximum positive case, move x to x - 32 MB + 4 */
- p = buf + 0x2000000;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
-
- /* Jump to x + 16 MB moved to x + 20 MB */
- p = buf;
- addr = 0x1000000 + (unsigned long)buf;
- patch_branch(p, addr, BRANCH_SET_LINK);
- q = buf + 0x1400000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Jump to x + 16 MB moved to x - 16 MB + 4 */
- p = buf + 0x1000000;
- addr = 0x2000000 + (unsigned long)buf;
- patch_branch(p, addr, 0);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
-
- /* Conditional branch tests */
-
- /* Simple case, branch to self moved a little */
- p = buf;
- addr = (unsigned long)p;
- create_cond_branch(&instr, p, addr, 0);
- patch_instruction(p, instr);
- check(instr_is_branch_to_addr(p, addr));
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(q, addr));
-
- /* Maximum negative case, move b . to addr + 32 KB */
- p = buf;
- addr = (unsigned long)p;
- create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
- patch_instruction(p, instr);
- q = buf + 0x8000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
-
- /* Maximum positive case, move x to x - 32 KB + 4 */
- p = buf + 0x8000;
- addr = (unsigned long)p;
- create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
- patch_instruction(p, instr);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
-
- /* Jump to x + 12 KB moved to x + 20 KB */
- p = buf;
- addr = 0x3000 + (unsigned long)buf;
- create_cond_branch(&instr, p, addr, BRANCH_SET_LINK);
- patch_instruction(p, instr);
- q = buf + 0x5000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Jump to x + 8 KB moved to x - 8 KB + 4 */
- p = buf + 0x2000;
- addr = 0x4000 + (unsigned long)buf;
- create_cond_branch(&instr, p, addr, 0);
- patch_instruction(p, instr);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Free the buffer we were using */
- vfree(buf);
-}
-
-#ifdef CONFIG_PPC64
-static void __init test_prefixed_patching(void)
-{
- extern unsigned int code_patching_test1[];
- extern unsigned int code_patching_test1_expected[];
- extern unsigned int end_code_patching_test1[];
-
- __patch_instruction(code_patching_test1,
- ppc_inst_prefix(OP_PREFIX << 26, 0x00000000),
- code_patching_test1);
-
- check(!memcmp(code_patching_test1,
- code_patching_test1_expected,
- sizeof(unsigned int) *
- (end_code_patching_test1 - code_patching_test1)));
-}
-#else
-static inline void test_prefixed_patching(void) {}
-#endif
-
-static int __init test_code_patching(void)
-{
- printk(KERN_DEBUG "Running code patching self-tests ...\n");
-
- test_branch_iform();
- test_branch_bform();
- test_create_function_call();
- test_translate_branch();
- test_prefixed_patching();
-
- return 0;
-}
-late_initcall(test_code_patching);
-
-#endif /* CONFIG_CODE_PATCHING_SELFTEST */
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index a3bcf4786e4a..933b685e7ab6 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -4,11 +4,11 @@
*
* Copyright (C) 1996-2005 Paul Mackerras.
*/
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/code-patching-asm.h>
#include <asm/kasan.h>
@@ -57,9 +57,6 @@
EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
.text
- .stabs "arch/powerpc/lib/",N_SO,0,0,0f
- .stabs "copy_32.S",N_SO,0,0,0f
-0:
CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
diff --git a/arch/powerpc/lib/copy_mc_64.S b/arch/powerpc/lib/copy_mc_64.S
index 88d46c471493..bf1014b28fe8 100644
--- a/arch/powerpc/lib/copy_mc_64.S
+++ b/arch/powerpc/lib/copy_mc_64.S
@@ -4,9 +4,9 @@
* Derived from copyuser_power7.s by Anton Blanchard <anton@au.ibm.com>
* Author - Balbir Singh <bsingharora@gmail.com>
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
#include <asm/errno.h>
-#include <asm/export.h>
.macro err1
100:
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index d1091b5ee5da..f33a2e6088e5 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -2,18 +2,13 @@
/*
* Copyright (C) 2008 Mark Nelson, IBM Corp.
*/
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/feature-fixups.h>
- .section ".toc","aw"
-PPC64_CACHES:
- .tc ppc64_caches[TC],ppc64_caches
- .section ".text"
-
_GLOBAL_TOC(copy_page)
BEGIN_FTR_SECTION
lis r5,PAGE_SIZE@h
@@ -23,8 +18,18 @@ FTR_SECTION_ELSE
#endif
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
ori r5,r5,PAGE_SIZE@l
+#ifdef CONFIG_PPC_KERNEL_PCREL
+ /*
+ * Hack for toolchain - prefixed instructions cause label difference to
+ * be non-constant even if 8 byte alignment is known, so they can not
+ * be put in FTR sections.
+ */
+ LOAD_REG_ADDR(r10, ppc64_caches)
+BEGIN_FTR_SECTION
+#else
BEGIN_FTR_SECTION
- ld r10,PPC64_CACHES@toc(r2)
+ LOAD_REG_ADDR(r10, ppc64_caches)
+#endif
lwz r11,DCACHEL1LOGBLOCKSIZE(r10) /* log2 of cache block size */
lwz r12,DCACHEL1BLOCKSIZE(r10) /* get cache block size */
li r9,0
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index a9844c6353cf..07e7cec4d135 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -27,17 +27,7 @@ _GLOBAL(copypage_power7)
#endif
ori r10,r7,1 /* stream=1 */
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
- /* setup read stream 0 */
- dcbt 0,r4,0b01000 /* addr from */
- dcbt 0,r7,0b01010 /* length and depth from */
- /* setup write stream 1 */
- dcbtst 0,r9,0b01000 /* addr to */
- dcbtst 0,r10,0b01010 /* length and depth to */
- eieio
- dcbt 0,r8,0b01010 /* all streams GO */
+ DCBT_SETUP_STREAMS(r4, r7, r9, r10, r8)
#ifdef CONFIG_ALTIVEC
mflr r0
@@ -45,7 +35,7 @@ _GLOBAL(copypage_power7)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_ops
+ bl CFUNC(enter_vmx_ops)
cmpwi r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -88,7 +78,7 @@ _GLOBAL(copypage_power7)
addi r3,r3,128
bdnz 1b
- b exit_vmx_ops /* tail call optimise */
+ b CFUNC(exit_vmx_ops) /* tail call optimise */
#else
li r0,(PAGE_SIZE/128)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index db8719a14846..9af969d2cc0c 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -2,9 +2,9 @@
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*/
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 28f0be523c06..8474c682a178 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -47,7 +47,7 @@
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
.Ldo_err3:
- bl exit_vmx_usercopy
+ bl CFUNC(exit_vmx_usercopy)
ld r0,STACKFRAMESIZE+16(r1)
mtlr r0
b .Lexit
@@ -272,7 +272,7 @@ err1; stb r0,0(r3)
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_usercopy
+ bl CFUNC(enter_vmx_usercopy)
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -298,17 +298,7 @@ err1; stb r0,0(r3)
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
- /* setup read stream 0 */
- dcbt 0,r6,0b01000 /* addr from */
- dcbt 0,r7,0b01010 /* length and depth from */
- /* setup write stream 1 */
- dcbtst 0,r9,0b01000 /* addr to */
- dcbtst 0,r10,0b01010 /* length and depth to */
- eieio
- dcbt 0,r8,0b01010 /* all streams GO */
+ DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8)
beq cr1,.Lunwind_stack_nonvmx_copy
@@ -488,7 +478,7 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- b exit_vmx_usercopy /* tail call optimise */
+ b CFUNC(exit_vmx_usercopy) /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
@@ -691,5 +681,5 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- b exit_vmx_usercopy /* tail call optimise */
+ b CFUNC(exit_vmx_usercopy) /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 7e5e1c28e56a..8967903c15e9 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -46,7 +46,7 @@
.section ".text"
-#ifndef CONFIG_PPC64
+#ifndef __powerpc64__
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index cda17bee5afe..587c8cf1230f 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -16,7 +16,7 @@
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
#include <asm/cputable.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/sections.h>
@@ -25,6 +25,13 @@
#include <asm/firmware.h>
#include <asm/inst.h>
+/*
+ * Used to generate warnings if mmu or cpu feature check functions that
+ * use static keys before they are initialized.
+ */
+bool static_key_feature_checks_initialized __read_mostly;
+EXPORT_SYMBOL_GPL(static_key_feature_checks_initialized);
+
struct fixup_entry {
unsigned long mask;
unsigned long value;
@@ -47,7 +54,7 @@ static u32 *calc_addr(struct fixup_entry *fcur, long offset)
static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
{
int err;
- struct ppc_inst instr;
+ ppc_inst_t instr;
instr = ppc_inst_read(src);
@@ -67,7 +74,8 @@ static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_e
return 0;
}
-static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
+static int patch_feature_section_mask(unsigned long value, unsigned long mask,
+ struct fixup_entry *fcur)
{
u32 *start, *end, *alt_start, *alt_end, *src, *dest;
@@ -79,7 +87,7 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
if ((alt_end - alt_start) > (end - start))
return 1;
- if ((value & fcur->mask) == fcur->value)
+ if ((value & fcur->mask & mask) == (fcur->value & mask))
return 0;
src = alt_start;
@@ -97,7 +105,8 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
return 0;
}
-void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+static void do_feature_fixups_mask(unsigned long value, unsigned long mask,
+ void *fixup_start, void *fixup_end)
{
struct fixup_entry *fcur, *fend;
@@ -105,7 +114,7 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
fend = fixup_end;
for (; fcur < fend; fcur++) {
- if (patch_feature_section(value, fcur)) {
+ if (patch_feature_section_mask(value, mask, fcur)) {
WARN_ON(1);
printk("Unable to patch feature section at %p - %p" \
" with %p - %p\n",
@@ -117,10 +126,69 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+ do_feature_fixups_mask(value, ~0, fixup_start, fixup_end);
+}
+
+#ifdef CONFIG_PPC_BARRIER_NOSPEC
+static bool is_fixup_addr_valid(void *dest, size_t size)
+{
+ return system_state < SYSTEM_FREEING_INITMEM ||
+ !init_section_contains(dest, size);
+}
+
+static int do_patch_fixups(long *start, long *end, unsigned int *instrs, int num)
+{
+ int i;
+
+ for (i = 0; start < end; start++, i++) {
+ int j;
+ unsigned int *dest = (void *)start + *start;
+
+ if (!is_fixup_addr_valid(dest, sizeof(*instrs) * num))
+ continue;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ for (j = 0; j < num; j++)
+ patch_instruction(dest + j, ppc_inst(instrs[j]));
+ }
+ return i;
+}
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
+static int do_patch_entry_fixups(long *start, long *end, unsigned int *instrs,
+ bool do_fallback, void *fallback)
+{
+ int i;
+
+ for (i = 0; start < end; start++, i++) {
+ unsigned int *dest = (void *)start + *start;
+
+ if (!is_fixup_addr_valid(dest, sizeof(*instrs) * 3))
+ continue;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ // See comment in do_entry_flush_fixups() RE order of patching
+ if (do_fallback) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1, (unsigned long)fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
+ }
+ return i;
+}
+
static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
{
- unsigned int instrs[3], *dest;
+ unsigned int instrs[3];
long *start, *end;
int i;
@@ -144,23 +212,8 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
-
- // See comment in do_entry_flush_fixups() RE order of patching
- if (types & STF_BARRIER_FALLBACK) {
- patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_branch(dest + 1,
- (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
- } else {
- patch_instruction(dest + 1, ppc_inst(instrs[1]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_instruction(dest, ppc_inst(instrs[0]));
- }
- }
+ i = do_patch_entry_fixups(start, end, instrs, types & STF_BARRIER_FALLBACK,
+ &stf_barrier_fallback);
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
@@ -172,7 +225,7 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
{
- unsigned int instrs[6], *dest;
+ unsigned int instrs[6];
long *start, *end;
int i;
@@ -206,18 +259,8 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
}
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
+ i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs));
- pr_devel("patching dest %lx\n", (unsigned long)dest);
-
- patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction(dest + 1, ppc_inst(instrs[1]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_instruction(dest + 3, ppc_inst(instrs[3]));
- patch_instruction(dest + 4, ppc_inst(instrs[4]));
- patch_instruction(dest + 5, ppc_inst(instrs[5]));
- }
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
(types == STF_BARRIER_FALLBACK) ? "fallback" :
@@ -228,6 +271,7 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
static bool stf_exit_reentrant = false;
static bool rfi_exit_reentrant = false;
+static DEFINE_MUTEX(exit_flush_lock);
static int __do_stf_barrier_fixups(void *data)
{
@@ -253,6 +297,9 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
* low level interrupt exit code before patching. After the patching,
* if allowed, then flip the branch to allow fast exits.
*/
+
+ // Prevent static key update races with do_rfi_flush_fixups()
+ mutex_lock(&exit_flush_lock);
static_branch_enable(&interrupt_exit_not_reentrant);
stop_machine(__do_stf_barrier_fixups, &types, NULL);
@@ -264,11 +311,13 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
if (stf_exit_reentrant && rfi_exit_reentrant)
static_branch_disable(&interrupt_exit_not_reentrant);
+
+ mutex_unlock(&exit_flush_lock);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
{
- unsigned int instrs[4], *dest;
+ unsigned int instrs[4];
long *start, *end;
int i;
@@ -294,17 +343,7 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
-
- patch_instruction(dest, ppc_inst(instrs[0]));
-
- patch_instruction(dest + 1, ppc_inst(instrs[1]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_instruction(dest + 3, ppc_inst(instrs[3]));
- }
+ i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs));
printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" :
@@ -319,7 +358,7 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
static int __do_entry_flush_fixups(void *data)
{
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
- unsigned int instrs[3], *dest;
+ unsigned int instrs[3];
long *start, *end;
int i;
@@ -369,42 +408,13 @@ static int __do_entry_flush_fixups(void *data)
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
-
- if (types == L1D_FLUSH_FALLBACK) {
- patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_branch(dest + 1,
- (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
- } else {
- patch_instruction(dest + 1, ppc_inst(instrs[1]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_instruction(dest, ppc_inst(instrs[0]));
- }
- }
+ i = do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK,
+ &entry_flush_fallback);
start = PTRRELOC(&__start___scv_entry_flush_fixup);
end = PTRRELOC(&__stop___scv_entry_flush_fixup);
- for (; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
-
- if (types == L1D_FLUSH_FALLBACK) {
- patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_branch(dest + 1,
- (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
- } else {
- patch_instruction(dest + 1, ppc_inst(instrs[1]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- patch_instruction(dest, ppc_inst(instrs[0]));
- }
- }
-
+ i += do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK,
+ &scv_entry_flush_fallback);
printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" :
@@ -432,7 +442,7 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
static int __do_rfi_flush_fixups(void *data)
{
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
- unsigned int instrs[3], *dest;
+ unsigned int instrs[3];
long *start, *end;
int i;
@@ -445,7 +455,7 @@ static int __do_rfi_flush_fixups(void *data)
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = PPC_INST_BRANCH | 16;
+ instrs[0] = PPC_RAW_BRANCH(16);
i = 0;
if (types & L1D_FLUSH_ORI) {
@@ -456,15 +466,7 @@ static int __do_rfi_flush_fixups(void *data)
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
-
- patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction(dest + 1, ppc_inst(instrs[1]));
- patch_instruction(dest + 2, ppc_inst(instrs[2]));
- }
+ i = do_patch_fixups(start, end, instrs, ARRAY_SIZE(instrs));
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" :
@@ -486,6 +488,9 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
* without stop_machine, so this could be achieved with a broadcast
* IPI instead, but this matches the stf sequence.
*/
+
+ // Prevent static key update races with do_stf_barrier_fixups()
+ mutex_lock(&exit_flush_lock);
static_branch_enable(&interrupt_exit_not_reentrant);
stop_machine(__do_rfi_flush_fixups, &types, NULL);
@@ -497,11 +502,13 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
if (stf_exit_reentrant && rfi_exit_reentrant)
static_branch_disable(&interrupt_exit_not_reentrant);
+
+ mutex_unlock(&exit_flush_lock);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
{
- unsigned int instr, *dest;
+ unsigned int instr;
long *start, *end;
int i;
@@ -515,12 +522,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, ppc_inst(instr));
- }
+ i = do_patch_fixups(start, end, &instr, 1);
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
@@ -539,10 +541,10 @@ void do_barrier_nospec_fixups(bool enable)
}
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
{
- unsigned int instr[2], *dest;
+ unsigned int instr[2];
long *start, *end;
int i;
@@ -558,18 +560,12 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
instr[1] = PPC_RAW_SYNC();
}
- for (i = 0; start < end; start++, i++) {
- dest = (void *)start + *start;
-
- pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, ppc_inst(instr[0]));
- patch_instruction(dest + 1, ppc_inst(instr[1]));
- }
+ i = do_patch_fixups(start, end, instr, ARRAY_SIZE(instr));
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
-static void patch_btb_flush_section(long *curr)
+static void __init patch_btb_flush_section(long *curr)
{
unsigned int *start, *end;
@@ -581,7 +577,7 @@ static void patch_btb_flush_section(long *curr)
}
}
-void do_btb_flush_fixups(void)
+void __init do_btb_flush_fixups(void)
{
long *start, *end;
@@ -591,7 +587,7 @@ void do_btb_flush_fixups(void)
for (; start < end; start += 2)
patch_btb_flush_section(start);
}
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
@@ -610,10 +606,10 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
-static void do_final_fixups(void)
+static void __init do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- struct ppc_inst inst;
+ ppc_inst_t inst;
u32 *src, *dest, *end;
if (PHYSICAL_START == 0)
@@ -669,6 +665,17 @@ void __init apply_feature_fixups(void)
do_final_fixups();
}
+void __init update_mmu_feature_fixups(unsigned long mask)
+{
+ saved_mmu_features &= ~mask;
+ saved_mmu_features |= cur_cpu_spec->mmu_features & mask;
+
+ do_feature_fixups_mask(cur_cpu_spec->mmu_features, mask,
+ PTRRELOC(&__start___mmu_ftr_fixup),
+ PTRRELOC(&__stop___mmu_ftr_fixup));
+ mmu_feature_keys_init();
+}
+
void __init setup_feature_keys(void)
{
/*
@@ -679,6 +686,7 @@ void __init setup_feature_keys(void)
jump_label_init();
cpu_feature_keys_init();
mmu_feature_keys_init();
+ static_key_feature_checks_initialized = true;
}
static int __init check_features(void)
@@ -701,15 +709,20 @@ late_initcall(check_features);
#define check(x) \
if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
+static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
+{
+ return patch_feature_section_mask(value, ~0, fcur);
+}
+
/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
static struct fixup_entry fixup;
-static long calc_offset(struct fixup_entry *entry, unsigned int *p)
+static long __init calc_offset(struct fixup_entry *entry, unsigned int *p)
{
return (unsigned long)p - (unsigned long)entry;
}
-static void test_basic_patching(void)
+static void __init test_basic_patching(void)
{
extern unsigned int ftr_fixup_test1[];
extern unsigned int end_ftr_fixup_test1[];
@@ -740,7 +753,7 @@ static void test_basic_patching(void)
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
}
-static void test_alternative_patching(void)
+static void __init test_alternative_patching(void)
{
extern unsigned int ftr_fixup_test2[];
extern unsigned int end_ftr_fixup_test2[];
@@ -773,7 +786,7 @@ static void test_alternative_patching(void)
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
}
-static void test_alternative_case_too_big(void)
+static void __init test_alternative_case_too_big(void)
{
extern unsigned int ftr_fixup_test3[];
extern unsigned int end_ftr_fixup_test3[];
@@ -799,7 +812,7 @@ static void test_alternative_case_too_big(void)
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
}
-static void test_alternative_case_too_small(void)
+static void __init test_alternative_case_too_small(void)
{
extern unsigned int ftr_fixup_test4[];
extern unsigned int end_ftr_fixup_test4[];
@@ -845,7 +858,7 @@ static void test_alternative_case_with_branch(void)
check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
}
-static void test_alternative_case_with_external_branch(void)
+static void __init test_alternative_case_with_external_branch(void)
{
extern unsigned int ftr_fixup_test6[];
extern unsigned int end_ftr_fixup_test6[];
@@ -855,7 +868,7 @@ static void test_alternative_case_with_external_branch(void)
check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
}
-static void test_alternative_case_with_branch_to_end(void)
+static void __init test_alternative_case_with_branch_to_end(void)
{
extern unsigned int ftr_fixup_test7[];
extern unsigned int end_ftr_fixup_test7[];
@@ -865,7 +878,7 @@ static void test_alternative_case_with_branch_to_end(void)
check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
}
-static void test_cpu_macros(void)
+static void __init test_cpu_macros(void)
{
extern u8 ftr_fixup_test_FTR_macros[];
extern u8 ftr_fixup_test_FTR_macros_expected[];
@@ -877,7 +890,7 @@ static void test_cpu_macros(void)
ftr_fixup_test_FTR_macros_expected, size) == 0);
}
-static void test_fw_macros(void)
+static void __init test_fw_macros(void)
{
#ifdef CONFIG_PPC64
extern u8 ftr_fixup_test_FW_FTR_macros[];
@@ -891,7 +904,7 @@ static void test_fw_macros(void)
#endif
}
-static void test_lwsync_macros(void)
+static void __init test_lwsync_macros(void)
{
extern u8 lwsync_fixup_test[];
extern u8 end_lwsync_fixup_test[];
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index 6effad901ef7..151875050da9 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -5,16 +5,16 @@
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/feature-fixups.h>
/* Note: This code relies on -mminimal-toc */
_GLOBAL(__arch_hweight8)
BEGIN_FTR_SECTION
- b __sw_hweight8
+ b CFUNC(__sw_hweight8)
nop
nop
FTR_SECTION_ELSE
@@ -26,7 +26,7 @@ EXPORT_SYMBOL(__arch_hweight8)
_GLOBAL(__arch_hweight16)
BEGIN_FTR_SECTION
- b __sw_hweight16
+ b CFUNC(__sw_hweight16)
nop
nop
nop
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(__arch_hweight16)
_GLOBAL(__arch_hweight32)
BEGIN_FTR_SECTION
- b __sw_hweight32
+ b CFUNC(__sw_hweight32)
nop
nop
nop
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(__arch_hweight32)
_GLOBAL(__arch_hweight64)
BEGIN_FTR_SECTION
- b __sw_hweight64
+ b CFUNC(__sw_hweight64)
nop
nop
nop
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 9351ffab409c..6fd06cd20faa 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -4,10 +4,10 @@
*
* Copyright (C) 1996 Paul Mackerras.
*/
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/kasan.h>
#ifndef CONFIG_KASAN
diff --git a/arch/powerpc/lib/memcmp_32.S b/arch/powerpc/lib/memcmp_32.S
index 5010e376f7b8..f6fca5664e91 100644
--- a/arch/powerpc/lib/memcmp_32.S
+++ b/arch/powerpc/lib/memcmp_32.S
@@ -7,8 +7,8 @@
*
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
.text
diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
index 384218df71ba..142c666d3897 100644
--- a/arch/powerpc/lib/memcmp_64.S
+++ b/arch/powerpc/lib/memcmp_64.S
@@ -3,8 +3,8 @@
* Author: Anton Blanchard <anton@au.ibm.com>
* Copyright 2015 IBM Corporation.
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/ppc-opcode.h>
#define off8 r6
@@ -44,7 +44,7 @@
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
- bl enter_vmx_ops; \
+ bl CFUNC(enter_vmx_ops); \
cmpwi cr1,r3,0; \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
@@ -60,7 +60,7 @@
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1); \
std r0,16(r1); \
stdu r1,-STACKFRAMESIZE(r1); \
- bl exit_vmx_ops; \
+ bl CFUNC(exit_vmx_ops); \
ld r0,STACKFRAMESIZE+16(r1); \
ld r3,STK_REG(R31)(r1); \
ld r4,STK_REG(R30)(r1); \
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 016c91e958d8..b5a67e20143f 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -2,9 +2,9 @@
/*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*/
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <asm/kasan.h>
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 54f226333c94..b7c5e7fca8b9 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -218,7 +218,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
std r5,-STACKFRAMESIZE+STK_REG(R29)(r1)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl enter_vmx_ops
+ bl CFUNC(enter_vmx_ops)
cmpwi cr1,r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STK_REG(R31)(r1)
@@ -244,15 +244,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
or r7,r7,r0
ori r10,r7,1 /* stream=1 */
- lis r8,0x8000 /* GO=1 */
- clrldi r8,r8,32
-
- dcbt 0,r6,0b01000
- dcbt 0,r7,0b01010
- dcbtst 0,r9,0b01000
- dcbtst 0,r10,0b01010
- eieio
- dcbt 0,r8,0b01010 /* GO */
+ DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8)
beq cr1,.Lunwind_stack_nonvmx_copy
@@ -433,7 +425,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- b exit_vmx_ops /* tail call optimise */
+ b CFUNC(exit_vmx_ops) /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
@@ -637,5 +629,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
15: addi r1,r1,STACKFRAMESIZE
ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
- b exit_vmx_ops /* tail call optimise */
+ b CFUNC(exit_vmx_ops) /* tail call optimise */
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
index eb2919ddf9b9..4e724c4c01ad 100644
--- a/arch/powerpc/lib/pmem.c
+++ b/arch/powerpc/lib/pmem.c
@@ -85,10 +85,3 @@ void memcpy_flushcache(void *dest, const void *src, size_t size)
clean_pmem_range(start, start + size);
}
EXPORT_SYMBOL(memcpy_flushcache);
-
-void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
- size_t len)
-{
- memcpy_flushcache(to, page_to_virt(page) + offset, len);
-}
-EXPORT_SYMBOL(memcpy_page_flushcache);
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
new file mode 100644
index 000000000000..95ab4cdf582e
--- /dev/null
+++ b/arch/powerpc/lib/qspinlock.c
@@ -0,0 +1,998 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/percpu.h>
+#include <linux/processor.h>
+#include <linux/smp.h>
+#include <linux/topology.h>
+#include <linux/sched/clock.h>
+#include <asm/qspinlock.h>
+#include <asm/paravirt.h>
+#include <trace/events/lock.h>
+
+#define MAX_NODES 4
+
+struct qnode {
+ struct qnode *next;
+ struct qspinlock *lock;
+ int cpu;
+ u8 sleepy; /* 1 if the previous vCPU was preempted or
+ * if the previous node was sleepy */
+ u8 locked; /* 1 if lock acquired */
+};
+
+struct qnodes {
+ int count;
+ struct qnode nodes[MAX_NODES];
+};
+
+/* Tuning parameters */
+static int steal_spins __read_mostly = (1 << 5);
+static int remote_steal_spins __read_mostly = (1 << 2);
+#if _Q_SPIN_TRY_LOCK_STEAL == 1
+static const bool maybe_stealers = true;
+#else
+static bool maybe_stealers __read_mostly = true;
+#endif
+static int head_spins __read_mostly = (1 << 8);
+
+static bool pv_yield_owner __read_mostly = true;
+static bool pv_yield_allow_steal __read_mostly = false;
+static bool pv_spin_on_preempted_owner __read_mostly = false;
+static bool pv_sleepy_lock __read_mostly = true;
+static bool pv_sleepy_lock_sticky __read_mostly = false;
+static u64 pv_sleepy_lock_interval_ns __read_mostly = 0;
+static int pv_sleepy_lock_factor __read_mostly = 256;
+static bool pv_yield_prev __read_mostly = true;
+static bool pv_yield_sleepy_owner __read_mostly = true;
+static bool pv_prod_head __read_mostly = false;
+
+static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
+static DEFINE_PER_CPU_ALIGNED(u64, sleepy_lock_seen_clock);
+
+#if _Q_SPIN_SPEC_BARRIER == 1
+#define spec_barrier() do { asm volatile("ori 31,31,0" ::: "memory"); } while (0)
+#else
+#define spec_barrier() do { } while (0)
+#endif
+
+static __always_inline bool recently_sleepy(void)
+{
+ /* pv_sleepy_lock is true when this is called */
+ if (pv_sleepy_lock_interval_ns) {
+ u64 seen = this_cpu_read(sleepy_lock_seen_clock);
+
+ if (seen) {
+ u64 delta = sched_clock() - seen;
+ if (delta < pv_sleepy_lock_interval_ns)
+ return true;
+ this_cpu_write(sleepy_lock_seen_clock, 0);
+ }
+ }
+
+ return false;
+}
+
+static __always_inline int get_steal_spins(bool paravirt, bool sleepy)
+{
+ if (paravirt && sleepy)
+ return steal_spins * pv_sleepy_lock_factor;
+ else
+ return steal_spins;
+}
+
+static __always_inline int get_remote_steal_spins(bool paravirt, bool sleepy)
+{
+ if (paravirt && sleepy)
+ return remote_steal_spins * pv_sleepy_lock_factor;
+ else
+ return remote_steal_spins;
+}
+
+static __always_inline int get_head_spins(bool paravirt, bool sleepy)
+{
+ if (paravirt && sleepy)
+ return head_spins * pv_sleepy_lock_factor;
+ else
+ return head_spins;
+}
+
+static inline u32 encode_tail_cpu(int cpu)
+{
+ return (cpu + 1) << _Q_TAIL_CPU_OFFSET;
+}
+
+static inline int decode_tail_cpu(u32 val)
+{
+ return (val >> _Q_TAIL_CPU_OFFSET) - 1;
+}
+
+static inline int get_owner_cpu(u32 val)
+{
+ return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET;
+}
+
+/*
+ * Try to acquire the lock if it was not already locked. If the tail matches
+ * mytail then clear it, otherwise leave it unchnaged. Return previous value.
+ *
+ * This is used by the head of the queue to acquire the lock and clean up
+ * its tail if it was the last one queued.
+ */
+static __always_inline u32 trylock_clean_tail(struct qspinlock *lock, u32 tail)
+{
+ u32 newval = queued_spin_encode_locked_val();
+ u32 prev, tmp;
+
+ asm volatile(
+"1: lwarx %0,0,%2,%7 # trylock_clean_tail \n"
+ /* This test is necessary if there could be stealers */
+" andi. %1,%0,%5 \n"
+" bne 3f \n"
+ /* Test whether the lock tail == mytail */
+" and %1,%0,%6 \n"
+" cmpw 0,%1,%3 \n"
+ /* Merge the new locked value */
+" or %1,%1,%4 \n"
+" bne 2f \n"
+ /* If the lock tail matched, then clear it, otherwise leave it. */
+" andc %1,%1,%6 \n"
+"2: stwcx. %1,0,%2 \n"
+" bne- 1b \n"
+"\t" PPC_ACQUIRE_BARRIER " \n"
+"3: \n"
+ : "=&r" (prev), "=&r" (tmp)
+ : "r" (&lock->val), "r"(tail), "r" (newval),
+ "i" (_Q_LOCKED_VAL),
+ "r" (_Q_TAIL_CPU_MASK),
+ "i" (_Q_SPIN_EH_HINT)
+ : "cr0", "memory");
+
+ return prev;
+}
+
+/*
+ * Publish our tail, replacing previous tail. Return previous value.
+ *
+ * This provides a release barrier for publishing node, this pairs with the
+ * acquire barrier in get_tail_qnode() when the next CPU finds this tail
+ * value.
+ */
+static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail)
+{
+ u32 prev, tmp;
+
+ kcsan_release();
+
+ asm volatile(
+"\t" PPC_RELEASE_BARRIER " \n"
+"1: lwarx %0,0,%2 # publish_tail_cpu \n"
+" andc %1,%0,%4 \n"
+" or %1,%1,%3 \n"
+" stwcx. %1,0,%2 \n"
+" bne- 1b \n"
+ : "=&r" (prev), "=&r"(tmp)
+ : "r" (&lock->val), "r" (tail), "r"(_Q_TAIL_CPU_MASK)
+ : "cr0", "memory");
+
+ return prev;
+}
+
+static __always_inline u32 set_mustq(struct qspinlock *lock)
+{
+ u32 prev;
+
+ asm volatile(
+"1: lwarx %0,0,%1 # set_mustq \n"
+" or %0,%0,%2 \n"
+" stwcx. %0,0,%1 \n"
+" bne- 1b \n"
+ : "=&r" (prev)
+ : "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
+ : "cr0", "memory");
+
+ return prev;
+}
+
+static __always_inline u32 clear_mustq(struct qspinlock *lock)
+{
+ u32 prev;
+
+ asm volatile(
+"1: lwarx %0,0,%1 # clear_mustq \n"
+" andc %0,%0,%2 \n"
+" stwcx. %0,0,%1 \n"
+" bne- 1b \n"
+ : "=&r" (prev)
+ : "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
+ : "cr0", "memory");
+
+ return prev;
+}
+
+static __always_inline bool try_set_sleepy(struct qspinlock *lock, u32 old)
+{
+ u32 prev;
+ u32 new = old | _Q_SLEEPY_VAL;
+
+ BUG_ON(!(old & _Q_LOCKED_VAL));
+ BUG_ON(old & _Q_SLEEPY_VAL);
+
+ asm volatile(
+"1: lwarx %0,0,%1 # try_set_sleepy \n"
+" cmpw 0,%0,%2 \n"
+" bne- 2f \n"
+" stwcx. %3,0,%1 \n"
+" bne- 1b \n"
+"2: \n"
+ : "=&r" (prev)
+ : "r" (&lock->val), "r"(old), "r" (new)
+ : "cr0", "memory");
+
+ return likely(prev == old);
+}
+
+static __always_inline void seen_sleepy_owner(struct qspinlock *lock, u32 val)
+{
+ if (pv_sleepy_lock) {
+ if (pv_sleepy_lock_interval_ns)
+ this_cpu_write(sleepy_lock_seen_clock, sched_clock());
+ if (!(val & _Q_SLEEPY_VAL))
+ try_set_sleepy(lock, val);
+ }
+}
+
+static __always_inline void seen_sleepy_lock(void)
+{
+ if (pv_sleepy_lock && pv_sleepy_lock_interval_ns)
+ this_cpu_write(sleepy_lock_seen_clock, sched_clock());
+}
+
+static __always_inline void seen_sleepy_node(void)
+{
+ if (pv_sleepy_lock) {
+ if (pv_sleepy_lock_interval_ns)
+ this_cpu_write(sleepy_lock_seen_clock, sched_clock());
+ /* Don't set sleepy because we likely have a stale val */
+ }
+}
+
+static struct qnode *get_tail_qnode(struct qspinlock *lock, int prev_cpu)
+{
+ struct qnodes *qnodesp = per_cpu_ptr(&qnodes, prev_cpu);
+ int idx;
+
+ /*
+ * After publishing the new tail and finding a previous tail in the
+ * previous val (which is the control dependency), this barrier
+ * orders the release barrier in publish_tail_cpu performed by the
+ * last CPU, with subsequently looking at its qnode structures
+ * after the barrier.
+ */
+ smp_acquire__after_ctrl_dep();
+
+ for (idx = 0; idx < MAX_NODES; idx++) {
+ struct qnode *qnode = &qnodesp->nodes[idx];
+ if (qnode->lock == lock)
+ return qnode;
+ }
+
+ BUG();
+}
+
+/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
+static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq)
+{
+ int owner;
+ u32 yield_count;
+ bool preempted = false;
+
+ BUG_ON(!(val & _Q_LOCKED_VAL));
+
+ if (!paravirt)
+ goto relax;
+
+ if (!pv_yield_owner)
+ goto relax;
+
+ owner = get_owner_cpu(val);
+ yield_count = yield_count_of(owner);
+
+ if ((yield_count & 1) == 0)
+ goto relax; /* owner vcpu is running */
+
+ spin_end();
+
+ seen_sleepy_owner(lock, val);
+ preempted = true;
+
+ /*
+ * Read the lock word after sampling the yield count. On the other side
+ * there may a wmb because the yield count update is done by the
+ * hypervisor preemption and the value update by the OS, however this
+ * ordering might reduce the chance of out of order accesses and
+ * improve the heuristic.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(lock->val) == val) {
+ if (mustq)
+ clear_mustq(lock);
+ yield_to_preempted(owner, yield_count);
+ if (mustq)
+ set_mustq(lock);
+ spin_begin();
+
+ /* Don't relax if we yielded. Maybe we should? */
+ return preempted;
+ }
+ spin_begin();
+relax:
+ spin_cpu_relax();
+
+ return preempted;
+}
+
+/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
+static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
+{
+ return __yield_to_locked_owner(lock, val, paravirt, false);
+}
+
+/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
+static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
+{
+ bool mustq = false;
+
+ if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal)
+ mustq = true;
+
+ return __yield_to_locked_owner(lock, val, paravirt, mustq);
+}
+
+static __always_inline void propagate_sleepy(struct qnode *node, u32 val, bool paravirt)
+{
+ struct qnode *next;
+ int owner;
+
+ if (!paravirt)
+ return;
+ if (!pv_yield_sleepy_owner)
+ return;
+
+ next = READ_ONCE(node->next);
+ if (!next)
+ return;
+
+ if (next->sleepy)
+ return;
+
+ owner = get_owner_cpu(val);
+ if (vcpu_is_preempted(owner))
+ next->sleepy = 1;
+}
+
+/* Called inside spin_begin() */
+static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, int prev_cpu, bool paravirt)
+{
+ u32 yield_count;
+ bool preempted = false;
+
+ if (!paravirt)
+ goto relax;
+
+ if (!pv_yield_sleepy_owner)
+ goto yield_prev;
+
+ /*
+ * If the previous waiter was preempted it might not be able to
+ * propagate sleepy to us, so check the lock in that case too.
+ */
+ if (node->sleepy || vcpu_is_preempted(prev_cpu)) {
+ u32 val = READ_ONCE(lock->val);
+
+ if (val & _Q_LOCKED_VAL) {
+ if (node->next && !node->next->sleepy) {
+ /*
+ * Propagate sleepy to next waiter. Only if
+ * owner is preempted, which allows the queue
+ * to become "non-sleepy" if vCPU preemption
+ * ceases to occur, even if the lock remains
+ * highly contended.
+ */
+ if (vcpu_is_preempted(get_owner_cpu(val)))
+ node->next->sleepy = 1;
+ }
+
+ preempted = yield_to_locked_owner(lock, val, paravirt);
+ if (preempted)
+ return preempted;
+ }
+ node->sleepy = false;
+ }
+
+yield_prev:
+ if (!pv_yield_prev)
+ goto relax;
+
+ yield_count = yield_count_of(prev_cpu);
+ if ((yield_count & 1) == 0)
+ goto relax; /* owner vcpu is running */
+
+ spin_end();
+
+ preempted = true;
+ seen_sleepy_node();
+
+ smp_rmb(); /* See __yield_to_locked_owner comment */
+
+ if (!READ_ONCE(node->locked)) {
+ yield_to_preempted(prev_cpu, yield_count);
+ spin_begin();
+ return preempted;
+ }
+ spin_begin();
+
+relax:
+ spin_cpu_relax();
+
+ return preempted;
+}
+
+static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy)
+{
+ if (iters >= get_steal_spins(paravirt, sleepy))
+ return true;
+
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ (iters >= get_remote_steal_spins(paravirt, sleepy))) {
+ int cpu = get_owner_cpu(val);
+ if (numa_node_id() != cpu_to_node(cpu))
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt)
+{
+ bool seen_preempted = false;
+ bool sleepy = false;
+ int iters = 0;
+ u32 val;
+
+ if (!steal_spins) {
+ /* XXX: should spin_on_preempted_owner do anything here? */
+ return false;
+ }
+
+ /* Attempt to steal the lock */
+ spin_begin();
+ do {
+ bool preempted = false;
+
+ val = READ_ONCE(lock->val);
+ if (val & _Q_MUST_Q_VAL)
+ break;
+ spec_barrier();
+
+ if (unlikely(!(val & _Q_LOCKED_VAL))) {
+ spin_end();
+ if (__queued_spin_trylock_steal(lock))
+ return true;
+ spin_begin();
+ } else {
+ preempted = yield_to_locked_owner(lock, val, paravirt);
+ }
+
+ if (paravirt && pv_sleepy_lock) {
+ if (!sleepy) {
+ if (val & _Q_SLEEPY_VAL) {
+ seen_sleepy_lock();
+ sleepy = true;
+ } else if (recently_sleepy()) {
+ sleepy = true;
+ }
+ }
+ if (pv_sleepy_lock_sticky && seen_preempted &&
+ !(val & _Q_SLEEPY_VAL)) {
+ if (try_set_sleepy(lock, val))
+ val |= _Q_SLEEPY_VAL;
+ }
+ }
+
+ if (preempted) {
+ seen_preempted = true;
+ sleepy = true;
+ if (!pv_spin_on_preempted_owner)
+ iters++;
+ /*
+ * pv_spin_on_preempted_owner don't increase iters
+ * while the owner is preempted -- we won't interfere
+ * with it by definition. This could introduce some
+ * latency issue if we continually observe preempted
+ * owners, but hopefully that's a rare corner case of
+ * a badly oversubscribed system.
+ */
+ } else {
+ iters++;
+ }
+ } while (!steal_break(val, iters, paravirt, sleepy));
+
+ spin_end();
+
+ return false;
+}
+
+static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
+{
+ struct qnodes *qnodesp;
+ struct qnode *next, *node;
+ u32 val, old, tail;
+ bool seen_preempted = false;
+ bool sleepy = false;
+ bool mustq = false;
+ int idx;
+ int iters = 0;
+
+ BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+
+ qnodesp = this_cpu_ptr(&qnodes);
+ if (unlikely(qnodesp->count >= MAX_NODES)) {
+ spec_barrier();
+ while (!queued_spin_trylock(lock))
+ cpu_relax();
+ return;
+ }
+
+ idx = qnodesp->count++;
+ /*
+ * Ensure that we increment the head node->count before initialising
+ * the actual node. If the compiler is kind enough to reorder these
+ * stores, then an IRQ could overwrite our assignments.
+ */
+ barrier();
+ node = &qnodesp->nodes[idx];
+ node->next = NULL;
+ node->lock = lock;
+ node->cpu = smp_processor_id();
+ node->sleepy = 0;
+ node->locked = 0;
+
+ tail = encode_tail_cpu(node->cpu);
+
+ /*
+ * Assign all attributes of a node before it can be published.
+ * Issues an lwsync, serving as a release barrier, as well as a
+ * compiler barrier.
+ */
+ old = publish_tail_cpu(lock, tail);
+
+ /*
+ * If there was a previous node; link it and wait until reaching the
+ * head of the waitqueue.
+ */
+ if (old & _Q_TAIL_CPU_MASK) {
+ int prev_cpu = decode_tail_cpu(old);
+ struct qnode *prev = get_tail_qnode(lock, prev_cpu);
+
+ /* Link @node into the waitqueue. */
+ WRITE_ONCE(prev->next, node);
+
+ /* Wait for mcs node lock to be released */
+ spin_begin();
+ while (!READ_ONCE(node->locked)) {
+ spec_barrier();
+
+ if (yield_to_prev(lock, node, prev_cpu, paravirt))
+ seen_preempted = true;
+ }
+ spec_barrier();
+ spin_end();
+
+ smp_rmb(); /* acquire barrier for the mcs lock */
+
+ /*
+ * Generic qspinlocks have this prefetch here, but it seems
+ * like it could cause additional line transitions because
+ * the waiter will keep loading from it.
+ */
+ if (_Q_SPIN_PREFETCH_NEXT) {
+ next = READ_ONCE(node->next);
+ if (next)
+ prefetchw(next);
+ }
+ }
+
+ /* We're at the head of the waitqueue, wait for the lock. */
+again:
+ spin_begin();
+ for (;;) {
+ bool preempted;
+
+ val = READ_ONCE(lock->val);
+ if (!(val & _Q_LOCKED_VAL))
+ break;
+ spec_barrier();
+
+ if (paravirt && pv_sleepy_lock && maybe_stealers) {
+ if (!sleepy) {
+ if (val & _Q_SLEEPY_VAL) {
+ seen_sleepy_lock();
+ sleepy = true;
+ } else if (recently_sleepy()) {
+ sleepy = true;
+ }
+ }
+ if (pv_sleepy_lock_sticky && seen_preempted &&
+ !(val & _Q_SLEEPY_VAL)) {
+ if (try_set_sleepy(lock, val))
+ val |= _Q_SLEEPY_VAL;
+ }
+ }
+
+ propagate_sleepy(node, val, paravirt);
+ preempted = yield_head_to_locked_owner(lock, val, paravirt);
+ if (!maybe_stealers)
+ continue;
+
+ if (preempted)
+ seen_preempted = true;
+
+ if (paravirt && preempted) {
+ sleepy = true;
+
+ if (!pv_spin_on_preempted_owner)
+ iters++;
+ } else {
+ iters++;
+ }
+
+ if (!mustq && iters >= get_head_spins(paravirt, sleepy)) {
+ mustq = true;
+ set_mustq(lock);
+ val |= _Q_MUST_Q_VAL;
+ }
+ }
+ spec_barrier();
+ spin_end();
+
+ /* If we're the last queued, must clean up the tail. */
+ old = trylock_clean_tail(lock, tail);
+ if (unlikely(old & _Q_LOCKED_VAL)) {
+ BUG_ON(!maybe_stealers);
+ goto again; /* Can only be true if maybe_stealers. */
+ }
+
+ if ((old & _Q_TAIL_CPU_MASK) == tail)
+ goto release; /* We were the tail, no next. */
+
+ /* There is a next, must wait for node->next != NULL (MCS protocol) */
+ next = READ_ONCE(node->next);
+ if (!next) {
+ spin_begin();
+ while (!(next = READ_ONCE(node->next)))
+ cpu_relax();
+ spin_end();
+ }
+ spec_barrier();
+
+ /*
+ * Unlock the next mcs waiter node. Release barrier is not required
+ * here because the acquirer is only accessing the lock word, and
+ * the acquire barrier we took the lock with orders that update vs
+ * this store to locked. The corresponding barrier is the smp_rmb()
+ * acquire barrier for mcs lock, above.
+ */
+ if (paravirt && pv_prod_head) {
+ int next_cpu = next->cpu;
+ WRITE_ONCE(next->locked, 1);
+ if (_Q_SPIN_MISO)
+ asm volatile("miso" ::: "memory");
+ if (vcpu_is_preempted(next_cpu))
+ prod_cpu(next_cpu);
+ } else {
+ WRITE_ONCE(next->locked, 1);
+ if (_Q_SPIN_MISO)
+ asm volatile("miso" ::: "memory");
+ }
+
+release:
+ /*
+ * Clear the lock before releasing the node, as another CPU might see stale
+ * values if an interrupt occurs after we increment qnodesp->count
+ * but before node->lock is initialized. The barrier ensures that
+ * there are no further stores to the node after it has been released.
+ */
+ node->lock = NULL;
+ barrier();
+ qnodesp->count--;
+}
+
+void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock)
+{
+ trace_contention_begin(lock, LCB_F_SPIN);
+ /*
+ * This looks funny, but it induces the compiler to inline both
+ * sides of the branch rather than share code as when the condition
+ * is passed as the paravirt argument to the functions.
+ */
+ if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) && is_shared_processor()) {
+ if (try_to_steal_lock(lock, true))
+ spec_barrier();
+ else
+ queued_spin_lock_mcs_queue(lock, true);
+ } else {
+ if (try_to_steal_lock(lock, false))
+ spec_barrier();
+ else
+ queued_spin_lock_mcs_queue(lock, false);
+ }
+ trace_contention_end(lock, 0);
+}
+EXPORT_SYMBOL(queued_spin_lock_slowpath);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void pv_spinlocks_init(void)
+{
+}
+#endif
+
+#include <linux/debugfs.h>
+static int steal_spins_set(void *data, u64 val)
+{
+#if _Q_SPIN_TRY_LOCK_STEAL == 1
+ /* MAYBE_STEAL remains true */
+ steal_spins = val;
+#else
+ static DEFINE_MUTEX(lock);
+
+ /*
+ * The lock slow path has a !maybe_stealers case that can assume
+ * the head of queue will not see concurrent waiters. That waiter
+ * is unsafe in the presence of stealers, so must keep them away
+ * from one another.
+ */
+
+ mutex_lock(&lock);
+ if (val && !steal_spins) {
+ maybe_stealers = true;
+ /* wait for queue head waiter to go away */
+ synchronize_rcu();
+ steal_spins = val;
+ } else if (!val && steal_spins) {
+ steal_spins = val;
+ /* wait for all possible stealers to go away */
+ synchronize_rcu();
+ maybe_stealers = false;
+ } else {
+ steal_spins = val;
+ }
+ mutex_unlock(&lock);
+#endif
+
+ return 0;
+}
+
+static int steal_spins_get(void *data, u64 *val)
+{
+ *val = steal_spins;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n");
+
+static int remote_steal_spins_set(void *data, u64 val)
+{
+ remote_steal_spins = val;
+
+ return 0;
+}
+
+static int remote_steal_spins_get(void *data, u64 *val)
+{
+ *val = remote_steal_spins;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_remote_steal_spins, remote_steal_spins_get, remote_steal_spins_set, "%llu\n");
+
+static int head_spins_set(void *data, u64 val)
+{
+ head_spins = val;
+
+ return 0;
+}
+
+static int head_spins_get(void *data, u64 *val)
+{
+ *val = head_spins;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, "%llu\n");
+
+static int pv_yield_owner_set(void *data, u64 val)
+{
+ pv_yield_owner = !!val;
+
+ return 0;
+}
+
+static int pv_yield_owner_get(void *data, u64 *val)
+{
+ *val = pv_yield_owner;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n");
+
+static int pv_yield_allow_steal_set(void *data, u64 val)
+{
+ pv_yield_allow_steal = !!val;
+
+ return 0;
+}
+
+static int pv_yield_allow_steal_get(void *data, u64 *val)
+{
+ *val = pv_yield_allow_steal;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n");
+
+static int pv_spin_on_preempted_owner_set(void *data, u64 val)
+{
+ pv_spin_on_preempted_owner = !!val;
+
+ return 0;
+}
+
+static int pv_spin_on_preempted_owner_get(void *data, u64 *val)
+{
+ *val = pv_spin_on_preempted_owner;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_spin_on_preempted_owner, pv_spin_on_preempted_owner_get, pv_spin_on_preempted_owner_set, "%llu\n");
+
+static int pv_sleepy_lock_set(void *data, u64 val)
+{
+ pv_sleepy_lock = !!val;
+
+ return 0;
+}
+
+static int pv_sleepy_lock_get(void *data, u64 *val)
+{
+ *val = pv_sleepy_lock;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock, pv_sleepy_lock_get, pv_sleepy_lock_set, "%llu\n");
+
+static int pv_sleepy_lock_sticky_set(void *data, u64 val)
+{
+ pv_sleepy_lock_sticky = !!val;
+
+ return 0;
+}
+
+static int pv_sleepy_lock_sticky_get(void *data, u64 *val)
+{
+ *val = pv_sleepy_lock_sticky;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_sticky, pv_sleepy_lock_sticky_get, pv_sleepy_lock_sticky_set, "%llu\n");
+
+static int pv_sleepy_lock_interval_ns_set(void *data, u64 val)
+{
+ pv_sleepy_lock_interval_ns = val;
+
+ return 0;
+}
+
+static int pv_sleepy_lock_interval_ns_get(void *data, u64 *val)
+{
+ *val = pv_sleepy_lock_interval_ns;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_interval_ns, pv_sleepy_lock_interval_ns_get, pv_sleepy_lock_interval_ns_set, "%llu\n");
+
+static int pv_sleepy_lock_factor_set(void *data, u64 val)
+{
+ pv_sleepy_lock_factor = val;
+
+ return 0;
+}
+
+static int pv_sleepy_lock_factor_get(void *data, u64 *val)
+{
+ *val = pv_sleepy_lock_factor;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_factor, pv_sleepy_lock_factor_get, pv_sleepy_lock_factor_set, "%llu\n");
+
+static int pv_yield_prev_set(void *data, u64 val)
+{
+ pv_yield_prev = !!val;
+
+ return 0;
+}
+
+static int pv_yield_prev_get(void *data, u64 *val)
+{
+ *val = pv_yield_prev;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n");
+
+static int pv_yield_sleepy_owner_set(void *data, u64 val)
+{
+ pv_yield_sleepy_owner = !!val;
+
+ return 0;
+}
+
+static int pv_yield_sleepy_owner_get(void *data, u64 *val)
+{
+ *val = pv_yield_sleepy_owner;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_sleepy_owner, pv_yield_sleepy_owner_get, pv_yield_sleepy_owner_set, "%llu\n");
+
+static int pv_prod_head_set(void *data, u64 val)
+{
+ pv_prod_head = !!val;
+
+ return 0;
+}
+
+static int pv_prod_head_get(void *data, u64 *val)
+{
+ *val = pv_prod_head;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_prod_head, pv_prod_head_get, pv_prod_head_set, "%llu\n");
+
+static __init int spinlock_debugfs_init(void)
+{
+ debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
+ debugfs_create_file("qspl_remote_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_remote_steal_spins);
+ debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins);
+ if (is_shared_processor()) {
+ debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner);
+ debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal);
+ debugfs_create_file("qspl_pv_spin_on_preempted_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_spin_on_preempted_owner);
+ debugfs_create_file("qspl_pv_sleepy_lock", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock);
+ debugfs_create_file("qspl_pv_sleepy_lock_sticky", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_sticky);
+ debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_interval_ns);
+ debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir, NULL, &fops_pv_sleepy_lock_factor);
+ debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
+ debugfs_create_file("qspl_pv_yield_sleepy_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_sleepy_owner);
+ debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
+ }
+
+ return 0;
+}
+device_initcall(spinlock_debugfs_init);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index d8d5f901cee1..ac3ee19531d8 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -15,9 +15,6 @@
#include <asm/cputable.h>
#include <asm/disassemble.h>
-extern char system_call_common[];
-extern char system_call_vectored_emulate[];
-
#ifdef CONFIG_PPC64
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffffUL
@@ -75,10 +72,8 @@ extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
unsigned long val)
{
-#ifdef __powerpc64__
if ((msr & MSR_64BIT) == 0)
val &= 0xffffffffUL;
-#endif
return val;
}
@@ -112,9 +107,9 @@ static nokprobe_inline long address_ok(struct pt_regs *regs,
{
if (!user_mode(regs))
return 1;
- if (__access_ok(ea, nb))
+ if (access_ok((void __user *)ea, nb))
return 1;
- if (__access_ok(ea, 1))
+ if (access_ok((void __user *)ea, 1))
/* Access overlaps the end of the user region */
regs->dar = TASK_SIZE_MAX - 1;
else
@@ -302,33 +297,51 @@ static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
}
}
-static nokprobe_inline int read_mem_aligned(unsigned long *dest,
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int
+__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
unsigned long x = 0;
switch (nb) {
case 1:
- err = __get_user(x, (unsigned char __user *) ea);
+ unsafe_get_user(x, (unsigned char __user *)ea, Efault);
break;
case 2:
- err = __get_user(x, (unsigned short __user *) ea);
+ unsafe_get_user(x, (unsigned short __user *)ea, Efault);
break;
case 4:
- err = __get_user(x, (unsigned int __user *) ea);
+ unsafe_get_user(x, (unsigned int __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __get_user(x, (unsigned long __user *) ea);
+ unsafe_get_user(x, (unsigned long __user *)ea, Efault);
break;
#endif
}
- if (!err)
- *dest = x;
- else
+ *dest = x;
+ return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int
+read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __read_mem_aligned(dest, ea, nb, regs);
+
+ if (user_read_access_begin((void __user *)ea, nb)) {
+ err = __read_mem_aligned(dest, ea, nb, regs);
+ user_read_access_end();
+ } else {
+ err = -EFAULT;
regs->dar = ea;
+ }
+
return err;
}
@@ -336,10 +349,8 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest,
* Copy from userspace to a buffer, using the largest possible
* aligned accesses, up to sizeof(long).
*/
-static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
int c;
for (; nb > 0; nb -= c) {
@@ -348,31 +359,46 @@ static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
c = max_align(nb);
switch (c) {
case 1:
- err = __get_user(*dest, (unsigned char __user *) ea);
+ unsafe_get_user(*dest, (u8 __user *)ea, Efault);
break;
case 2:
- err = __get_user(*(u16 *)dest,
- (unsigned short __user *) ea);
+ unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
break;
case 4:
- err = __get_user(*(u32 *)dest,
- (unsigned int __user *) ea);
+ unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __get_user(*(unsigned long *)dest,
- (unsigned long __user *) ea);
+ unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
break;
#endif
}
- if (err) {
- regs->dar = ea;
- return err;
- }
dest += c;
ea += c;
}
return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __copy_mem_in(dest, ea, nb, regs);
+
+ if (user_read_access_begin((void __user *)ea, nb)) {
+ err = __copy_mem_in(dest, ea, nb, regs);
+ user_read_access_end();
+ } else {
+ err = -EFAULT;
+ regs->dar = ea;
+ }
+
+ return err;
}
static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
@@ -410,30 +436,48 @@ static int read_mem(unsigned long *dest, unsigned long ea, int nb,
}
NOKPROBE_SYMBOL(read_mem);
-static nokprobe_inline int write_mem_aligned(unsigned long val,
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int
+__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
-
switch (nb) {
case 1:
- err = __put_user(val, (unsigned char __user *) ea);
+ unsafe_put_user(val, (unsigned char __user *)ea, Efault);
break;
case 2:
- err = __put_user(val, (unsigned short __user *) ea);
+ unsafe_put_user(val, (unsigned short __user *)ea, Efault);
break;
case 4:
- err = __put_user(val, (unsigned int __user *) ea);
+ unsafe_put_user(val, (unsigned int __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __put_user(val, (unsigned long __user *) ea);
+ unsafe_put_user(val, (unsigned long __user *)ea, Efault);
break;
#endif
}
- if (err)
+ return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int
+write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __write_mem_aligned(val, ea, nb, regs);
+
+ if (user_write_access_begin((void __user *)ea, nb)) {
+ err = __write_mem_aligned(val, ea, nb, regs);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
regs->dar = ea;
+ }
+
return err;
}
@@ -441,10 +485,8 @@ static nokprobe_inline int write_mem_aligned(unsigned long val,
* Copy from a buffer to userspace, using the largest possible
* aligned accesses, up to sizeof(long).
*/
-static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
int c;
for (; nb > 0; nb -= c) {
@@ -453,31 +495,46 @@ static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
c = max_align(nb);
switch (c) {
case 1:
- err = __put_user(*dest, (unsigned char __user *) ea);
+ unsafe_put_user(*dest, (u8 __user *)ea, Efault);
break;
case 2:
- err = __put_user(*(u16 *)dest,
- (unsigned short __user *) ea);
+ unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
break;
case 4:
- err = __put_user(*(u32 *)dest,
- (unsigned int __user *) ea);
+ unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __put_user(*(unsigned long *)dest,
- (unsigned long __user *) ea);
+ unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
break;
#endif
}
- if (err) {
- regs->dar = ea;
- return err;
- }
dest += c;
ea += c;
}
return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __copy_mem_out(dest, ea, nb, regs);
+
+ if (user_write_access_begin((void __user *)ea, nb)) {
+ err = __copy_mem_out(dest, ea, nb, regs);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
+ regs->dar = ea;
+ }
+
+ return err;
}
static nokprobe_inline int write_mem_unaligned(unsigned long val,
@@ -529,6 +586,8 @@ static int do_fp_load(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -579,6 +638,8 @@ static int do_fp_store(struct instruction_op *op, unsigned long ea,
} u;
nb = GETSIZE(op->type);
+ if (nb > sizeof(u))
+ return -EINVAL;
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
@@ -623,6 +684,9 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u = {};
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
@@ -631,7 +695,7 @@ static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
if (err)
return err;
if (unlikely(cross_endian))
- do_byte_reverse(&u.b[ea & 0xf], size);
+ do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
preempt_disable();
if (regs->msr & MSR_VEC)
put_vr(rn, &u.v);
@@ -650,6 +714,9 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
u8 b[sizeof(__vector128)];
} u;
+ if (size > sizeof(u))
+ return -EINVAL;
+
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
@@ -662,7 +729,7 @@ static nokprobe_inline int do_vec_store(int rn, unsigned long ea,
u.v = current->thread.vr_state.vr[rn];
preempt_enable();
if (unlikely(cross_endian))
- do_byte_reverse(&u.b[ea & 0xf], size);
+ do_byte_reverse(&u.b[ea & 0xf], min_t(size_t, size, sizeof(u)));
return copy_mem_out(&u.b[ea & 0xf], ea, size, regs);
}
#endif /* CONFIG_ALTIVEC */
@@ -713,8 +780,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
#endif /* __powerpc64 */
#ifdef CONFIG_VSX
-void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
- const void *mem, bool rev)
+static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
+ const void *mem, bool rev)
{
int size, read_size;
int i, j;
@@ -796,11 +863,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
break;
}
}
-EXPORT_SYMBOL_GPL(emulate_vsx_load);
-NOKPROBE_SYMBOL(emulate_vsx_load);
-void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
- void *mem, bool rev)
+static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
+ void *mem, bool rev)
{
int size, write_size;
int i, j;
@@ -888,8 +953,6 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
break;
}
}
-EXPORT_SYMBOL_GPL(emulate_vsx_store);
-NOKPROBE_SYMBOL(emulate_vsx_store);
static nokprobe_inline int do_vsx_load(struct instruction_op *op,
unsigned long ea, struct pt_regs *regs,
@@ -986,35 +1049,53 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
}
#endif /* CONFIG_VSX */
+static __always_inline int __emulate_dcbz(unsigned long ea)
+{
+ unsigned long i;
+ unsigned long size = l1_dcache_bytes();
+
+ for (i = 0; i < size; i += sizeof(long))
+ unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
+
+ return 0;
+
+Efault:
+ return -EFAULT;
+}
+
int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
{
int err;
- unsigned long i, size;
+ unsigned long size = l1_dcache_bytes();
-#ifdef __powerpc64__
- size = ppc64_caches.l1d.block_size;
- if (!(regs->msr & MSR_64BIT))
- ea &= 0xffffffffUL;
-#else
- size = L1_CACHE_BYTES;
-#endif
+ ea = truncate_if_32bit(regs->msr, ea);
ea &= ~(size - 1);
if (!address_ok(regs, ea, size))
return -EFAULT;
- for (i = 0; i < size; i += sizeof(long)) {
- err = __put_user(0, (unsigned long __user *) (ea + i));
- if (err) {
- regs->dar = ea;
- return err;
- }
+
+ if (is_kernel_addr(ea)) {
+ err = __emulate_dcbz(ea);
+ } else if (user_write_access_begin((void __user *)ea, size)) {
+ err = __emulate_dcbz(ea);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
}
- return 0;
+
+ if (err)
+ regs->dar = ea;
+
+
+ return err;
}
NOKPROBE_SYMBOL(emulate_dcbz);
#define __put_user_asmx(x, addr, err, op, cr) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine power8\n" \
"1: " op " %2,0,%3\n" \
+ ".machine pop\n" \
" mfcr %1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
@@ -1027,7 +1108,10 @@ NOKPROBE_SYMBOL(emulate_dcbz);
#define __get_user_asmx(x, addr, err, op) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine power8\n" \
"1: "op" %1,0,%2\n" \
+ ".machine pop\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
@@ -1056,10 +1140,8 @@ static nokprobe_inline void set_cr0(const struct pt_regs *regs,
op->type |= SETCC;
op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
-#ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT))
val = (int) val;
-#endif
if (val < 0)
op->ccval |= 0x80000000;
else if (val > 0)
@@ -1087,15 +1169,11 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
if (carry_in)
++val;
- op->type = COMPUTE + SETREG + SETXER;
+ op->type = COMPUTE | SETREG | SETXER;
op->reg = rd;
op->val = val;
-#ifdef __powerpc64__
- if (!(regs->msr & MSR_64BIT)) {
- val = (unsigned int) val;
- val1 = (unsigned int) val1;
- }
-#endif
+ val = truncate_if_32bit(regs->msr, val);
+ val1 = truncate_if_32bit(regs->msr, val1);
op->xerval = regs->xer;
if (val < val1 || (carry_in && val == val1))
op->xerval |= XER_CA;
@@ -1112,7 +1190,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1131,7 +1209,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1271,7 +1349,7 @@ static nokprobe_inline int trap_compare(long v1, long v2)
* otherwise.
*/
int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- struct ppc_inst instr)
+ ppc_inst_t instr)
{
#ifdef CONFIG_PPC64
unsigned int suffixopcode, prefixtype, prefix_r;
@@ -1301,7 +1379,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
-#ifdef CONFIG_PPC64
case 17: /* sc */
if ((word & 0xfe2) == 2)
op->type = SYSCALL;
@@ -1313,7 +1390,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
} else
op->type = UNKNOWN;
return 0;
-#endif
case 18: /* b */
op->type = BRANCH | BRTAKEN;
imm = word & 0x03fffffc;
@@ -1349,7 +1425,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 18: /* rfid, scary */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = RFI;
return 0;
@@ -1662,13 +1738,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
#endif
case 83: /* mfmsr */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = MFMSR;
op->reg = rd;
return 0;
case 146: /* mtmsr */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = MTMSR;
op->reg = rd;
@@ -1676,7 +1752,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
#ifdef CONFIG_PPC64
case 178: /* mtmsrd */
- if (regs->msr & MSR_PR)
+ if (user_mode(regs))
goto priv;
op->type = MTMSR;
op->reg = rd;
@@ -2214,15 +2290,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->type = MKOP(STCX, 0, 4);
break;
-#ifdef __powerpc64__
- case 84: /* ldarx */
- op->type = MKOP(LARX, 0, 8);
- break;
-
- case 214: /* stdcx. */
- op->type = MKOP(STCX, 0, 8);
- break;
-
+#ifdef CONFIG_PPC_HAS_LBARX_LHARX
case 52: /* lbarx */
op->type = MKOP(LARX, 0, 1);
break;
@@ -2238,6 +2306,15 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 726: /* sthcx. */
op->type = MKOP(STCX, 0, 2);
break;
+#endif
+#ifdef __powerpc64__
+ case 84: /* ldarx */
+ op->type = MKOP(LARX, 0, 8);
+ break;
+
+ case 214: /* stdcx. */
+ op->type = MKOP(STCX, 0, 8);
+ break;
case 276: /* lqarx */
if (!((rd & 1) || rd == ra || rd == rb))
@@ -3181,12 +3258,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
case BARRIER_EIEIO:
eieio();
break;
+#ifdef CONFIG_PPC64
case BARRIER_LWSYNC:
asm volatile("lwsync" : : : "memory");
break;
case BARRIER_PTESYNC:
asm volatile("ptesync" : : : "memory");
break;
+#endif
}
break;
@@ -3262,7 +3341,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
err = 0;
val = 0;
switch (size) {
-#ifdef __powerpc64__
+#ifdef CONFIG_PPC_HAS_LBARX_LHARX
case 1:
__get_user_asmx(val, ea, err, "lbarx");
break;
@@ -3304,7 +3383,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
__put_user_asmx(op->val, ea, err, "stbcx.", cr);
break;
case 2:
- __put_user_asmx(op->val, ea, err, "stbcx.", cr);
+ __put_user_asmx(op->val, ea, err, "sthcx.", cr);
break;
#endif
case 4:
@@ -3354,14 +3433,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
* stored in the thread_struct. If the instruction is in
* the kernel, we must not touch the state in the thread_struct.
*/
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
+ if (!user_mode(regs) && !(regs->msr & MSR_FP))
return 0;
err = do_fp_load(op, ea, regs, cross_endian);
break;
#endif
#ifdef CONFIG_ALTIVEC
case LOAD_VMX:
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
+ if (!user_mode(regs) && !(regs->msr & MSR_VEC))
return 0;
err = do_vec_load(op->reg, ea, size, regs, cross_endian);
break;
@@ -3376,7 +3455,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
*/
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC;
- if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
+ if (!user_mode(regs) && !(regs->msr & msrbit))
return 0;
err = do_vsx_load(op, ea, regs, cross_endian);
break;
@@ -3412,8 +3491,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
}
#endif
if ((op->type & UPDATE) && size == sizeof(long) &&
- op->reg == 1 && op->update_reg == 1 &&
- !(regs->msr & MSR_PR) &&
+ op->reg == 1 && op->update_reg == 1 && !user_mode(regs) &&
ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
err = handle_stack_update(ea, regs);
break;
@@ -3425,14 +3503,14 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
#ifdef CONFIG_PPC_FPU
case STORE_FP:
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP))
+ if (!user_mode(regs) && !(regs->msr & MSR_FP))
return 0;
err = do_fp_store(op, ea, regs, cross_endian);
break;
#endif
#ifdef CONFIG_ALTIVEC
case STORE_VMX:
- if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC))
+ if (!user_mode(regs) && !(regs->msr & MSR_VEC))
return 0;
err = do_vec_store(op->reg, ea, size, regs, cross_endian);
break;
@@ -3447,7 +3525,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
*/
if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC))
msrbit = MSR_VEC;
- if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit))
+ if (!user_mode(regs) && !(regs->msr & msrbit))
return 0;
err = do_vsx_store(op, ea, regs, cross_endian);
break;
@@ -3495,7 +3573,7 @@ NOKPROBE_SYMBOL(emulate_loadstore);
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
-int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
+int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
{
struct instruction_op op;
int r, err, type;
@@ -3566,43 +3644,22 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
-#ifdef CONFIG_PPC64
case SYSCALL: /* sc */
/*
- * N.B. this uses knowledge about how the syscall
- * entry code works. If that is changed, this will
- * need to be changed also.
+ * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
+ * single step a system call instruction:
+ *
+ * Successful completion for an instruction means that the
+ * instruction caused no other interrupt. Thus a Trace
+ * interrupt never occurs for a System Call or System Call
+ * Vectored instruction, or for a Trap instruction that
+ * traps.
*/
- if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
- cpu_has_feature(CPU_FTR_REAL_LE) &&
- regs->gpr[0] == 0x1ebe) {
- regs_set_return_msr(regs, regs->msr ^ MSR_LE);
- goto instr_done;
- }
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs_set_return_ip(regs, (unsigned long) &system_call_common);
- regs_set_return_msr(regs, MSR_KERNEL);
- return 1;
-
-#ifdef CONFIG_PPC_BOOK3S_64
+ return -1;
case SYSCALL_VECTORED_0: /* scv 0 */
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
- regs_set_return_msr(regs, MSR_KERNEL);
- return 1;
-#endif
-
+ return -1;
case RFI:
return -1;
-#endif
}
return 0;
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 2752b1cc1d45..daa72061dc0c 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -4,8 +4,8 @@
*
* Copyright (C) 1996 Paul Mackerras.
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/cache.h>
.text
diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S
index 1ddb26394e8a..3ee45619a3f8 100644
--- a/arch/powerpc/lib/string_32.S
+++ b/arch/powerpc/lib/string_32.S
@@ -7,8 +7,8 @@
*
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/cache.h>
.text
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 169872bc0892..a25eb8588434 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -6,15 +6,10 @@
* Author: Anton Blanchard <anton@au.ibm.com>
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
#include <asm/linkage.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
-
- .section ".toc","aw"
-PPC64_CACHES:
- .tc ppc64_caches[TC],ppc64_caches
- .section ".text"
/**
* __arch_clear_user: - Zero a block of memory in user space, with less checking.
@@ -133,7 +128,7 @@ err1; stb r0,0(r3)
blr
.Llong_clear:
- ld r5,PPC64_CACHES@toc(r2)
+ LOAD_REG_ADDR(r5, ppc64_caches)
bf cr7*4+0,11f
err2; std r0,0(r3)
diff --git a/arch/powerpc/lib/strlen_32.S b/arch/powerpc/lib/strlen_32.S
index 0a8d3f64d493..bbd24feb233f 100644
--- a/arch/powerpc/lib/strlen_32.S
+++ b/arch/powerpc/lib/strlen_32.S
@@ -6,8 +6,8 @@
*
* Inspired from glibc implementation
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
-#include <asm/export.h>
#include <asm/cache.h>
.text
diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c
new file mode 100644
index 000000000000..1440d99630b3
--- /dev/null
+++ b/arch/powerpc/lib/test-code-patching.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2008 Michael Ellerman, IBM Corporation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#include <asm/text-patching.h>
+
+static int __init instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
+{
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
+ return branch_target(instr) == addr;
+
+ return 0;
+}
+
+static void __init test_trampoline(void)
+{
+ asm ("nop;nop;\n");
+}
+
+#define check(x) do { \
+ if (!(x)) \
+ pr_err("code-patching: test failed at line %d\n", __LINE__); \
+} while (0)
+
+static void __init test_branch_iform(void)
+{
+ int err;
+ ppc_inst_t instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned long addr = (unsigned long)tmp;
+
+ /* The simplest case, branch to self, no flags */
+ check(instr_is_branch_iform(ppc_inst(0x48000000)));
+ /* All bits of target set, and flags */
+ check(instr_is_branch_iform(ppc_inst(0x4bffffff)));
+ /* High bit of opcode set, which is wrong */
+ check(!instr_is_branch_iform(ppc_inst(0xcbffffff)));
+ /* Middle bits of opcode set, which is wrong */
+ check(!instr_is_branch_iform(ppc_inst(0x7bffffff)));
+
+ /* Simplest case, branch to self with link */
+ check(instr_is_branch_iform(ppc_inst(0x48000001)));
+ /* All bits of targets set */
+ check(instr_is_branch_iform(ppc_inst(0x4bfffffd)));
+ /* Some bits of targets set */
+ check(instr_is_branch_iform(ppc_inst(0x4bff00fd)));
+ /* Must be a valid branch to start with */
+ check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
+
+ /* Absolute branch to 0x100 */
+ ppc_inst_write(iptr, ppc_inst(0x48000103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
+ /* Absolute branch to 0x420fc */
+ ppc_inst_write(iptr, ppc_inst(0x480420ff));
+ check(instr_is_branch_to_addr(iptr, 0x420fc));
+ /* Maximum positive relative branch, + 20MB - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x49fffffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
+ /* Smallest negative relative branch, - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x4bfffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
+ /* Largest negative relative branch, - 32 MB */
+ ppc_inst_write(iptr, ppc_inst(0x4a000000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
+
+ /* Branch to self, with link */
+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+
+ /* Branch to self - 0x100, with link */
+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
+
+ /* Branch to self + 0x100, no link */
+ err = create_branch(&instr, iptr, addr + 0x100, 0);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
+
+ /* Maximum relative negative offset, - 32 MB */
+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
+
+ /* Out of range relative negative offset, - 32 MB + 4*/
+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
+ check(err);
+
+ /* Out of range relative positive offset, + 32 MB */
+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
+ check(err);
+
+ /* Unaligned target */
+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
+ check(err);
+
+ /* Check flags are masked correctly */
+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+ check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
+}
+
+static void __init test_create_function_call(void)
+{
+ u32 *iptr;
+ unsigned long dest;
+ ppc_inst_t instr;
+
+ /* Check we can create a function call */
+ iptr = (u32 *)ppc_function_entry(test_trampoline);
+ dest = ppc_function_entry(test_create_function_call);
+ create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, dest));
+}
+
+static void __init test_branch_bform(void)
+{
+ int err;
+ unsigned long addr;
+ ppc_inst_t instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned int flags;
+
+ addr = (unsigned long)iptr;
+
+ /* The simplest case, branch to self, no flags */
+ check(instr_is_branch_bform(ppc_inst(0x40000000)));
+ /* All bits of target set, and flags */
+ check(instr_is_branch_bform(ppc_inst(0x43ffffff)));
+ /* High bit of opcode set, which is wrong */
+ check(!instr_is_branch_bform(ppc_inst(0xc3ffffff)));
+ /* Middle bits of opcode set, which is wrong */
+ check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
+
+ /* Absolute conditional branch to 0x100 */
+ ppc_inst_write(iptr, ppc_inst(0x43ff0103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
+ /* Absolute conditional branch to 0x20fc */
+ ppc_inst_write(iptr, ppc_inst(0x43ff20ff));
+ check(instr_is_branch_to_addr(iptr, 0x20fc));
+ /* Maximum positive relative conditional branch, + 32 KB - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x43ff7ffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
+ /* Smallest negative relative conditional branch, - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x43fffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
+ /* Largest negative relative conditional branch, - 32 KB */
+ ppc_inst_write(iptr, ppc_inst(0x43ff8000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
+
+ /* All condition code bits set & link */
+ flags = 0x3ff000 | BRANCH_SET_LINK;
+
+ /* Branch to self */
+ err = create_cond_branch(&instr, iptr, addr, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+
+ /* Branch to self - 0x100 */
+ err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
+
+ /* Branch to self + 0x100 */
+ err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
+
+ /* Maximum relative negative offset, - 32 KB */
+ err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
+
+ /* Out of range relative negative offset, - 32 KB + 4*/
+ err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
+ check(err);
+
+ /* Out of range relative positive offset, + 32 KB */
+ err = create_cond_branch(&instr, iptr, addr + 0x8000, flags);
+ check(err);
+
+ /* Unaligned target */
+ err = create_cond_branch(&instr, iptr, addr + 3, flags);
+ check(err);
+
+ /* Check flags are masked correctly */
+ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+ check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
+}
+
+static void __init test_translate_branch(void)
+{
+ unsigned long addr;
+ void *p, *q;
+ ppc_inst_t instr;
+ void *buf;
+
+ buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
+ check(buf);
+ if (!buf)
+ return;
+
+ /* Simple case, branch to self moved a little */
+ p = buf;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ q = p + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Maximum negative case, move b . to addr + 32 MB */
+ p = buf;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 0x2000000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
+
+ /* Maximum positive case, move x to x - 32 MB + 4 */
+ p = buf + 0x2000000;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
+
+ /* Jump to x + 16 MB moved to x + 20 MB */
+ p = buf;
+ addr = 0x1000000 + (unsigned long)buf;
+ create_branch(&instr, p, addr, BRANCH_SET_LINK);
+ ppc_inst_write(p, instr);
+ q = buf + 0x1400000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Jump to x + 16 MB moved to x - 16 MB + 4 */
+ p = buf + 0x1000000;
+ addr = 0x2000000 + (unsigned long)buf;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+
+ /* Conditional branch tests */
+
+ /* Simple case, branch to self moved a little */
+ p = buf;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Maximum negative case, move b . to addr + 32 KB */
+ p = buf;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ ppc_inst_write(p, instr);
+ q = buf + 0x8000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
+
+ /* Maximum positive case, move x to x - 32 KB + 4 */
+ p = buf + 0x8000;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
+
+ /* Jump to x + 12 KB moved to x + 20 KB */
+ p = buf;
+ addr = 0x3000 + (unsigned long)buf;
+ create_cond_branch(&instr, p, addr, BRANCH_SET_LINK);
+ ppc_inst_write(p, instr);
+ q = buf + 0x5000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Jump to x + 8 KB moved to x - 8 KB + 4 */
+ p = buf + 0x2000;
+ addr = 0x4000 + (unsigned long)buf;
+ create_cond_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Free the buffer we were using */
+ vfree(buf);
+}
+
+static void __init test_prefixed_patching(void)
+{
+ u32 *iptr = (u32 *)ppc_function_entry(test_trampoline);
+ u32 expected[2] = {OP_PREFIX << 26, 0};
+ ppc_inst_t inst = ppc_inst_prefix(OP_PREFIX << 26, 0);
+
+ if (!IS_ENABLED(CONFIG_PPC64))
+ return;
+
+ patch_instruction(iptr, inst);
+
+ check(!memcmp(iptr, expected, sizeof(expected)));
+}
+
+static void __init test_multi_instruction_patching(void)
+{
+ u32 code[32];
+ void *buf;
+ u32 *addr32;
+ u64 *addr64;
+ ppc_inst_t inst64 = ppc_inst_prefix(OP_PREFIX << 26 | 3UL << 24, PPC_RAW_TRAP());
+ u32 inst32 = PPC_RAW_NOP();
+
+ buf = vzalloc(PAGE_SIZE * 8);
+ check(buf);
+ if (!buf)
+ return;
+
+ /* Test single page 32-bit repeated instruction */
+ addr32 = buf + PAGE_SIZE;
+ check(!patch_instructions(addr32 + 1, &inst32, 12, true));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == inst32);
+ check(addr32[2] == inst32);
+ check(addr32[3] == inst32);
+ check(addr32[4] == 0);
+
+ /* Test single page 64-bit repeated instruction */
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ check(ppc_inst_prefixed(inst64));
+
+ addr64 = buf + PAGE_SIZE * 2;
+ ppc_inst_write(code, inst64);
+ check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true));
+
+ check(addr64[0] == 0);
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64));
+ check(addr64[4] == 0);
+ }
+
+ /* Test single page memcpy */
+ addr32 = buf + PAGE_SIZE * 3;
+
+ for (int i = 0; i < ARRAY_SIZE(code); i++)
+ code[i] = i + 1;
+
+ check(!patch_instructions(addr32 + 1, code, sizeof(code), false));
+
+ check(addr32[0] == 0);
+ check(!memcmp(&addr32[1], code, sizeof(code)));
+ check(addr32[ARRAY_SIZE(code) + 1] == 0);
+
+ /* Test multipage 32-bit repeated instruction */
+ addr32 = buf + PAGE_SIZE * 4 - 8;
+ check(!patch_instructions(addr32 + 1, &inst32, 12, true));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == inst32);
+ check(addr32[2] == inst32);
+ check(addr32[3] == inst32);
+ check(addr32[4] == 0);
+
+ /* Test multipage 64-bit repeated instruction */
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ check(ppc_inst_prefixed(inst64));
+
+ addr64 = buf + PAGE_SIZE * 5 - 8;
+ ppc_inst_write(code, inst64);
+ check(!patch_instructions((u32 *)(addr64 + 1), code, 24, true));
+
+ check(addr64[0] == 0);
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[1]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[2]), inst64));
+ check(ppc_inst_equal(ppc_inst_read((u32 *)&addr64[3]), inst64));
+ check(addr64[4] == 0);
+ }
+
+ /* Test multipage memcpy */
+ addr32 = buf + PAGE_SIZE * 6 - 12;
+
+ for (int i = 0; i < ARRAY_SIZE(code); i++)
+ code[i] = i + 1;
+
+ check(!patch_instructions(addr32 + 1, code, sizeof(code), false));
+
+ check(addr32[0] == 0);
+ check(!memcmp(&addr32[1], code, sizeof(code)));
+ check(addr32[ARRAY_SIZE(code) + 1] == 0);
+
+ vfree(buf);
+}
+
+static void __init test_data_patching(void)
+{
+ void *buf;
+ u32 *addr32;
+
+ buf = vzalloc(PAGE_SIZE);
+ check(buf);
+ if (!buf)
+ return;
+
+ addr32 = buf + 128;
+
+ addr32[1] = 0xA0A1A2A3;
+ addr32[2] = 0xB0B1B2B3;
+
+ check(!patch_uint(&addr32[1], 0xC0C1C2C3));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == 0xC0C1C2C3);
+ check(addr32[2] == 0xB0B1B2B3);
+ check(addr32[3] == 0);
+
+ /* Unaligned patch_ulong() should fail */
+ if (IS_ENABLED(CONFIG_PPC64))
+ check(patch_ulong(&addr32[1], 0xD0D1D2D3) == -EINVAL);
+
+ check(!patch_ulong(&addr32[2], 0xD0D1D2D3));
+
+ check(addr32[0] == 0);
+ check(addr32[1] == 0xC0C1C2C3);
+ check(*(unsigned long *)(&addr32[2]) == 0xD0D1D2D3);
+
+ if (!IS_ENABLED(CONFIG_PPC64))
+ check(addr32[3] == 0);
+
+ check(addr32[4] == 0);
+
+ vfree(buf);
+}
+
+static int __init test_code_patching(void)
+{
+ pr_info("Running code patching self-tests ...\n");
+
+ test_branch_iform();
+ test_branch_bform();
+ test_create_function_call();
+ test_translate_branch();
+ test_prefixed_patching();
+ test_multi_instruction_patching();
+ test_data_patching();
+
+ return 0;
+}
+late_initcall(test_code_patching);
diff --git a/arch/powerpc/lib/test_code-patching.S b/arch/powerpc/lib/test_code-patching.S
deleted file mode 100644
index a9be6107844e..000000000000
--- a/arch/powerpc/lib/test_code-patching.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020 IBM Corporation
- */
-#include <asm/ppc-opcode.h>
-
- .text
-
-#define globl(x) \
- .globl x; \
-x:
-
-globl(code_patching_test1)
- nop
- nop
-globl(end_code_patching_test1)
-
-globl(code_patching_test1_expected)
- .long OP_PREFIX << 26
- .long 0x0000000
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 8b4f6b3e96c4..66b5b4fa1686 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -11,7 +11,7 @@
#include <asm/cpu_has_feature.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
-#include <asm/code-patching.h>
+#include <asm/text-patching.h>
#include <asm/inst.h>
#define MAX_SUBTESTS 16
@@ -53,9 +53,6 @@
ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
PPC_RAW_ADDI(t, a, i))
-#define TEST_SETB(t, bfa) ppc_inst(PPC_INST_SETB | ___PPC_RT(t) | ___PPC_RA((bfa & 0x7) << 2))
-
-
static void __init init_pt_regs(struct pt_regs *regs)
{
static unsigned long msr;
@@ -792,7 +789,7 @@ static void __init test_lxvpx_stxvpx(void)
#ifdef CONFIG_VSX
static void __init test_plxvp_pstxvp(void)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
struct pt_regs regs;
union {
vector128 a;
@@ -906,7 +903,7 @@ struct compute_test {
struct {
char *descr;
unsigned long flags;
- struct ppc_inst instr;
+ ppc_inst_t instr;
struct pt_regs regs;
} subtests[MAX_SUBTESTS + 1];
};
@@ -935,21 +932,21 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "BFA = 1, CR = GT",
- .instr = TEST_SETB(20, 1),
+ .instr = ppc_inst(PPC_RAW_SETB(20, 1)),
.regs = {
.ccr = 0x4000000,
}
},
{
.descr = "BFA = 4, CR = LT",
- .instr = TEST_SETB(20, 4),
+ .instr = ppc_inst(PPC_RAW_SETB(20, 4)),
.regs = {
.ccr = 0x8000,
}
},
{
.descr = "BFA = 5, CR = EQ",
- .instr = TEST_SETB(20, 5),
+ .instr = ppc_inst(PPC_RAW_SETB(20, 5)),
.regs = {
.ccr = 0x200,
}
@@ -1600,7 +1597,7 @@ static struct compute_test compute_tests[] = {
};
static int __init emulate_compute_instr(struct pt_regs *regs,
- struct ppc_inst instr,
+ ppc_inst_t instr,
bool negative)
{
int analysed;
@@ -1616,18 +1613,18 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
if (analysed != 1 || GETTYPE(op.type) != COMPUTE) {
if (negative)
return -EFAULT;
- pr_info("emulation failed, instruction = %s\n", ppc_inst_as_str(instr));
+ pr_info("emulation failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
return -EFAULT;
}
if (analysed == 1 && negative)
- pr_info("negative test failed, instruction = %s\n", ppc_inst_as_str(instr));
+ pr_info("negative test failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
if (!negative)
emulate_update_regs(regs, &op);
return 0;
}
static int __init execute_compute_instr(struct pt_regs *regs,
- struct ppc_inst instr)
+ ppc_inst_t instr)
{
extern int exec_instr(struct pt_regs *regs);
@@ -1637,7 +1634,7 @@ static int __init execute_compute_instr(struct pt_regs *regs,
/* Patch the NOP with the actual instruction */
patch_instruction_site(&patch__exec_instr, instr);
if (exec_instr(regs)) {
- pr_info("execution failed, instruction = %s\n", ppc_inst_as_str(instr));
+ pr_info("execution failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
return -EFAULT;
}
@@ -1658,7 +1655,7 @@ static void __init run_tests_compute(void)
struct compute_test *test;
struct pt_regs *regs, exp, got;
unsigned int i, j, k;
- struct ppc_inst instr;
+ ppc_inst_t instr;
bool ignore_gpr, ignore_xer, ignore_ccr, passed, rc, negative;
for (i = 0; i < ARRAY_SIZE(compute_tests); i++) {
diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S
index 9ef941d958d8..e2b646a4f7fa 100644
--- a/arch/powerpc/lib/test_emulate_step_exec_instr.S
+++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S
@@ -16,7 +16,7 @@ _GLOBAL(exec_instr)
/*
* Stack frame layout (INT_FRAME_SIZE bytes)
- * In-memory pt_regs (SP + STACK_FRAME_OVERHEAD)
+ * In-memory pt_regs (SP + STACK_INT_FRAME_REGS)
* Scratch space (SP + 8)
* Back chain (SP + 0)
*/
@@ -37,7 +37,7 @@ _GLOBAL(exec_instr)
* The stack pointer (GPR1) and the thread pointer (GPR13) are not
* saved as these should not be modified anyway.
*/
- SAVE_2GPRS(2, r1)
+ SAVE_GPRS(2, 3, r1)
SAVE_NVGPRS(r1)
/*
@@ -75,8 +75,7 @@ _GLOBAL(exec_instr)
/* Load GPRs from pt_regs */
REST_GPR(0, r31)
- REST_10GPRS(2, r31)
- REST_GPR(12, r31)
+ REST_GPRS(2, 12, r31)
REST_NVGPRS(r31)
/* Placeholder for the test instruction */
@@ -99,8 +98,7 @@ _GLOBAL(exec_instr)
subi r3, r3, GPR0
SAVE_GPR(0, r3)
SAVE_GPR(2, r3)
- SAVE_8GPRS(4, r3)
- SAVE_GPR(12, r3)
+ SAVE_GPRS(4, 12, r3)
SAVE_NVGPRS(r3)
/* Save resulting LR to pt_regs */
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 62e6c3045252..54340912398f 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -9,7 +9,6 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/switch_to.h>
-#include <asm/asm-prototypes.h>
int enter_vmx_usercopy(void)
{
@@ -37,7 +36,17 @@ int exit_vmx_usercopy(void)
{
disable_kernel_altivec();
pagefault_enable();
- preempt_enable();
+ preempt_enable_no_resched();
+ /*
+ * Must never explicitly call schedule (including preempt_enable())
+ * while in a kuap-unlocked user copy, because the AMR register will
+ * not be saved and restored across context switch. However preempt
+ * kernels need to be preempted as soon as possible if need_resched is
+ * set and we are preemptible. The hack here is to schedule a
+ * decrementer to fire here and reschedule for us if necessary.
+ */
+ if (need_irq_preemption() && need_resched())
+ set_dec(1);
return 0;
}
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
index 54e61979e80e..aab49d056d18 100644
--- a/arch/powerpc/lib/xor_vmx.c
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -49,8 +49,9 @@ typedef vector signed char unative_t;
V1##_3 = vec_xor(V1##_3, V2##_3); \
} while (0)
-void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void __xor_altivec_2(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -67,8 +68,10 @@ void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void __xor_altivec_3(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -89,9 +92,11 @@ void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void __xor_altivec_4(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in,
+ const unsigned long * __restrict v4_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -116,9 +121,12 @@ void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void __xor_altivec_5(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in,
+ const unsigned long * __restrict v4_in,
+ const unsigned long * __restrict v5_in)
{
DEFINE(v1);
DEFINE(v2);
diff --git a/arch/powerpc/lib/xor_vmx.h b/arch/powerpc/lib/xor_vmx.h
index 5c2b0839b179..573c41d90dac 100644
--- a/arch/powerpc/lib/xor_vmx.h
+++ b/arch/powerpc/lib/xor_vmx.h
@@ -6,16 +6,17 @@
* outside of the enable/disable altivec block.
*/
-void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in);
-
-void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in);
-
-void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in);
-
-void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in);
+void __xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void __xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c
index 80dba916c367..35d917ece4d1 100644
--- a/arch/powerpc/lib/xor_vmx_glue.c
+++ b/arch/powerpc/lib/xor_vmx_glue.c
@@ -12,47 +12,51 @@
#include <asm/xor_altivec.h>
#include "xor_vmx.h"
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_2(bytes, v1_in, v2_in);
+ __xor_altivec_2(bytes, p1, p2);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_3(bytes, v1_in, v2_in, v3_in);
+ __xor_altivec_3(bytes, p1, p2, p3);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in);
+ __xor_altivec_4(bytes, p1, p2, p3, p4);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in);
+ __xor_altivec_5(bytes, p1, p2, p3, p4, p5);
disable_kernel_altivec();
preempt_enable();
}