summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile3
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c6
-rw-r--r--arch/arm64/kernel/alternative.c8
-rw-r--r--arch/arm64/kernel/cpu_errata.c21
-rw-r--r--arch/arm64/kernel/cpu_ops.c4
-rw-r--r--arch/arm64/kernel/cpufeature.c25
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c14
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/ftrace-mod.S18
-rw-r--r--arch/arm64/kernel/ftrace.c103
-rw-r--r--arch/arm64/kernel/head.S1
-rw-r--r--arch/arm64/kernel/insn.c7
-rw-r--r--arch/arm64/kernel/kaslr.c22
-rw-r--r--arch/arm64/kernel/module.c20
-rw-r--r--arch/arm64/kernel/pci.c13
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/probes/kprobes.c4
-rw-r--r--arch/arm64/kernel/process.c11
-rw-r--r--arch/arm64/kernel/ptrace.c39
-rw-r--r--arch/arm64/kernel/setup.c3
-rw-r--r--arch/arm64/kernel/signal.c413
-rw-r--r--arch/arm64/kernel/smp.c12
-rw-r--r--arch/arm64/kernel/stacktrace.c1
-rw-r--r--arch/arm64/kernel/topology.c248
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/kernel/vdso.c10
27 files changed, 681 insertions, 359 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 1dcb69d3d0e5..f2b4e816b6de 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -62,3 +62,6 @@ extra-y += $(head-y) vmlinux.lds
ifeq ($(CONFIG_DEBUG_EFI),y)
AFLAGS_head.o += -DVMLINUX_PATH="\"$(realpath $(objtree)/vmlinux)\""
endif
+
+# will be included by each individual module but not by the core kernel itself
+extra-$(CONFIG_DYNAMIC_FTRACE) += ftrace-mod.o
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index 1f5655cd9cc9..98a20e58758b 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -71,7 +71,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
{
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
struct parking_protocol_mailbox __iomem *mailbox;
- __le32 cpu_id;
+ u32 cpu_id;
/*
* Map mailbox memory with attribute device nGnRE (ie ioremap -
@@ -123,9 +123,9 @@ static void acpi_parking_protocol_cpu_postboot(void)
int cpu = smp_processor_id();
struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
struct parking_protocol_mailbox __iomem *mailbox = cpu_entry->mailbox;
- __le64 entry_point;
+ u64 entry_point;
- entry_point = readl_relaxed(&mailbox->entry_point);
+ entry_point = readq_relaxed(&mailbox->entry_point);
/*
* Check if firmware has cleared the entry_point as expected
* by the protocol specification.
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 8840c109c5d6..6dd0a3a3e5c9 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -28,7 +28,7 @@
#include <asm/sections.h>
#include <linux/stop_machine.h>
-#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
+#define __ALT_PTR(a,f) ((void *)&(a)->f + (a)->f)
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
@@ -60,7 +60,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
-static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
+static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
{
u32 insn;
@@ -109,7 +109,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
- u32 *origptr, *replptr, *updptr;
+ __le32 *origptr, *replptr, *updptr;
for (alt = region->begin; alt < region->end; alt++) {
u32 insn;
@@ -124,7 +124,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
origptr = ALT_ORIG_PTR(alt);
replptr = ALT_REPL_PTR(alt);
- updptr = use_linear_alias ? (u32 *)lm_alias(origptr) : origptr;
+ updptr = use_linear_alias ? lm_alias(origptr) : origptr;
nr_inst = alt->alt_len / sizeof(insn);
for (i = 0; i < nr_inst; i++) {
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 2ed2a7657711..0e27f86ee709 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -133,6 +133,27 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_30115
+ {
+ /* Cavium ThunderX, T88 pass 1.x - 2.2 */
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ MIDR_RANGE(MIDR_THUNDERX, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+ {
+ /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
+ },
+ {
+ /* Cavium ThunderX, T83 pass 1.0 */
+ .desc = "Cavium erratum 30115",
+ .capability = ARM64_WORKAROUND_CAVIUM_30115,
+ MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
+ },
+#endif
{
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index e137ceaf5016..d16978213c5b 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -82,8 +82,8 @@ static const char *__init cpu_read_enable_method(int cpu)
* Don't warn spuriously.
*/
if (cpu != 0)
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ pr_err("%pOF: missing enable-method property\n",
+ dn);
}
} else {
enable_method = acpi_get_enable_method(cpu);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 817ce3365e20..9f9e0064c8c1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -51,6 +51,25 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
+static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
+{
+ /* file-wide pr_fmt adds "CPU features: " prefix */
+ pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
+ return 0;
+}
+
+static struct notifier_block cpu_hwcaps_notifier = {
+ .notifier_call = dump_cpu_hwcaps
+};
+
+static int __init register_cpu_hwcaps_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &cpu_hwcaps_notifier);
+ return 0;
+}
+__initcall(register_cpu_hwcaps_dumper);
+
DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcap_keys);
@@ -639,8 +658,10 @@ void update_cpu_features(int cpu,
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
*/
- WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
- "Unsupported CPU feature variation.\n");
+ if (taint) {
+ pr_warn_once("Unsupported CPU feature variation detected.\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ }
}
u64 read_sanitised_ftr_reg(u32 id)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 68b1f364c515..f495ee5049fd 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -227,7 +227,7 @@ static struct attribute *cpuregs_id_attrs[] = {
NULL
};
-static struct attribute_group cpuregs_attr_group = {
+static const struct attribute_group cpuregs_attr_group = {
.attrs = cpuregs_id_attrs,
.name = "identification"
};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index d618e25c3de1..c7ef99904934 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -341,20 +341,22 @@ int aarch32_break_handler(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* get 16-bit Thumb instruction */
- get_user(thumb_instr, (u16 __user *)pc);
- thumb_instr = le16_to_cpu(thumb_instr);
+ __le16 instr;
+ get_user(instr, (__le16 __user *)pc);
+ thumb_instr = le16_to_cpu(instr);
if (thumb_instr == AARCH32_BREAK_THUMB2_LO) {
/* get second half of 32-bit Thumb-2 instruction */
- get_user(thumb_instr, (u16 __user *)(pc + 2));
- thumb_instr = le16_to_cpu(thumb_instr);
+ get_user(instr, (__le16 __user *)(pc + 2));
+ thumb_instr = le16_to_cpu(instr);
bp = thumb_instr == AARCH32_BREAK_THUMB2_HI;
} else {
bp = thumb_instr == AARCH32_BREAK_THUMB;
}
} else {
/* 32-bit ARM instruction */
- get_user(arm_instr, (u32 __user *)pc);
- arm_instr = le32_to_cpu(arm_instr);
+ __le32 instr;
+ get_user(instr, (__le32 __user *)pc);
+ arm_instr = le32_to_cpu(instr);
bp = (arm_instr & ~0xf0000000) == AARCH32_BREAK_ARM;
}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 06da8ea16bbe..c7b4995868e1 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
{
if (!system_supports_fpsimd())
return;
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kernel/ftrace-mod.S b/arch/arm64/kernel/ftrace-mod.S
new file mode 100644
index 000000000000..00c4025be4ff
--- /dev/null
+++ b/arch/arm64/kernel/ftrace-mod.S
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .section ".text.ftrace_trampoline", "ax"
+ .align 3
+0: .quad 0
+__ftrace_trampoline:
+ ldr x16, 0b
+ br x16
+ENDPROC(__ftrace_trampoline)
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 40ad08ac569a..c13b1fca0e5b 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -10,10 +10,12 @@
*/
#include <linux/ftrace.h>
+#include <linux/module.h>
#include <linux/swab.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/debug-monitors.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
@@ -70,6 +72,58 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long pc = rec->ip;
u32 old, new;
+ long offset = (long)pc - (long)addr;
+
+ if (offset < -SZ_128M || offset >= SZ_128M) {
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ unsigned long *trampoline;
+ struct module *mod;
+
+ /*
+ * On kernels that support module PLTs, the offset between the
+ * branch instruction and its target may legally exceed the
+ * range of an ordinary relative 'bl' opcode. In this case, we
+ * need to branch via a trampoline in the module.
+ *
+ * NOTE: __module_text_address() must be called with preemption
+ * disabled, but we can rely on ftrace_lock to ensure that 'mod'
+ * retains its validity throughout the remainder of this code.
+ */
+ preempt_disable();
+ mod = __module_text_address(pc);
+ preempt_enable();
+
+ if (WARN_ON(!mod))
+ return -EINVAL;
+
+ /*
+ * There is only one ftrace trampoline per module. For now,
+ * this is not a problem since on arm64, all dynamic ftrace
+ * invocations are routed via ftrace_caller(). This will need
+ * to be revisited if support for multiple ftrace entry points
+ * is added in the future, but for now, the pr_err() below
+ * deals with a theoretical issue only.
+ */
+ trampoline = (unsigned long *)mod->arch.ftrace_trampoline;
+ if (trampoline[0] != addr) {
+ if (trampoline[0] != 0) {
+ pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
+ return -EINVAL;
+ }
+
+ /* point the trampoline to our ftrace entry point */
+ module_disable_ro(mod);
+ trampoline[0] = addr;
+ module_enable_ro(mod, true);
+
+ /* update trampoline before patching in the branch */
+ smp_wmb();
+ }
+ addr = (unsigned long)&trampoline[1];
+#else /* CONFIG_ARM64_MODULE_PLTS */
+ return -EINVAL;
+#endif /* CONFIG_ARM64_MODULE_PLTS */
+ }
old = aarch64_insn_gen_nop();
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
@@ -84,12 +138,55 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
unsigned long pc = rec->ip;
- u32 old, new;
+ bool validate = true;
+ u32 old = 0, new;
+ long offset = (long)pc - (long)addr;
+
+ if (offset < -SZ_128M || offset >= SZ_128M) {
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ u32 replaced;
+
+ /*
+ * 'mod' is only set at module load time, but if we end up
+ * dealing with an out-of-range condition, we can assume it
+ * is due to a module being loaded far away from the kernel.
+ */
+ if (!mod) {
+ preempt_disable();
+ mod = __module_text_address(pc);
+ preempt_enable();
+
+ if (WARN_ON(!mod))
+ return -EINVAL;
+ }
+
+ /*
+ * The instruction we are about to patch may be a branch and
+ * link instruction that was redirected via a PLT entry. In
+ * this case, the normal validation will fail, but we can at
+ * least check that we are dealing with a branch and link
+ * instruction that points into the right module.
+ */
+ if (aarch64_insn_read((void *)pc, &replaced))
+ return -EFAULT;
+
+ if (!aarch64_insn_is_bl(replaced) ||
+ !within_module(pc + aarch64_get_branch_offset(replaced),
+ mod))
+ return -EINVAL;
+
+ validate = false;
+#else /* CONFIG_ARM64_MODULE_PLTS */
+ return -EINVAL;
+#endif /* CONFIG_ARM64_MODULE_PLTS */
+ } else {
+ old = aarch64_insn_gen_branch_imm(pc, addr,
+ AARCH64_INSN_BRANCH_LINK);
+ }
- old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);
new = aarch64_insn_gen_nop();
- return ftrace_modify_code(pc, old, new, true);
+ return ftrace_modify_code(pc, old, new, validate);
}
void arch_ftrace_update_code(int command)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 973df7de7bf8..adb0910b88f5 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -354,7 +354,6 @@ __primary_switched:
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
b.ne 0f
mov x0, x21 // pass FDT address in x0
- mov x1, x23 // pass modulo offset in x1
bl kaslr_early_init // parse FDT for KASLR options
cbz x0, 0f // KASLR disabled? just proceed
orr x23, x23, x0 // record KASLR offset
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index cd872133e88e..2718a77da165 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -117,7 +117,7 @@ static void __kprobes patch_unmap(int fixmap)
int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
{
int ret;
- u32 val;
+ __le32 val;
ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
if (!ret)
@@ -126,7 +126,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
return ret;
}
-static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
+static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
{
void *waddr = addr;
unsigned long flags = 0;
@@ -145,8 +145,7 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
int __kprobes aarch64_insn_write(void *addr, u32 insn)
{
- insn = cpu_to_le32(insn);
- return __aarch64_insn_write(addr, insn);
+ return __aarch64_insn_write(addr, cpu_to_le32(insn));
}
static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index d7e90d97f5c4..47080c49cc7e 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -27,7 +27,7 @@ u16 __initdata memstart_offset_seed;
static __init u64 get_kaslr_seed(void *fdt)
{
int node, len;
- u64 *prop;
+ fdt64_t *prop;
u64 ret;
node = fdt_path_offset(fdt, "/chosen");
@@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
* containing function pointers) to be reinitialized, and zero-initialized
* .bss variables will be reset to 0.
*/
-u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
+u64 __init kaslr_early_init(u64 dt_phys)
{
void *fdt;
u64 seed, offset, mask, module_range;
@@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
/*
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
- * happens, increase the KASLR offset by the size of the kernel image
- * rounded up by SWAPPER_BLOCK_SIZE.
+ * happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
+ *
+ * NOTE: The references to _text and _end below will already take the
+ * modulo offset (the physical displacement modulo 2 MB) into
+ * account, given that the physical placement is controlled by
+ * the loader, and will not change as a result of the virtual
+ * mapping we choose.
*/
- if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
- (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
- u64 kimg_sz = _end - _text;
- offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
- & mask;
- }
+ if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
+ (((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
+ offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
if (IS_ENABLED(CONFIG_KASAN))
/*
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f035ff6fb223..f469e0435903 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -74,7 +74,7 @@ enum aarch64_reloc_op {
RELOC_OP_PAGE,
};
-static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
+static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
{
switch (reloc_op) {
case RELOC_OP_ABS:
@@ -121,12 +121,12 @@ enum aarch64_insn_movw_imm_type {
AARCH64_INSN_IMM_MOVKZ,
};
-static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
+static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
int lsb, enum aarch64_insn_movw_imm_type imm_type)
{
u64 imm;
s64 sval;
- u32 insn = le32_to_cpu(*(u32 *)place);
+ u32 insn = le32_to_cpu(*place);
sval = do_reloc(op, place, val);
imm = sval >> lsb;
@@ -154,7 +154,7 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
/* Update the instruction with the new encoding. */
insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
- *(u32 *)place = cpu_to_le32(insn);
+ *place = cpu_to_le32(insn);
if (imm > U16_MAX)
return -ERANGE;
@@ -162,12 +162,12 @@ static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
return 0;
}
-static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
+static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
u64 imm, imm_mask;
s64 sval;
- u32 insn = le32_to_cpu(*(u32 *)place);
+ u32 insn = le32_to_cpu(*place);
/* Calculate the relocation value. */
sval = do_reloc(op, place, val);
@@ -179,7 +179,7 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
/* Update the instruction's immediate field. */
insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
- *(u32 *)place = cpu_to_le32(insn);
+ *place = cpu_to_le32(insn);
/*
* Extract the upper value bits (including the sign bit) and
@@ -420,8 +420,12 @@ int module_finalize(const Elf_Ehdr *hdr,
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
apply_alternatives((void *)s->sh_addr, s->sh_size);
- return 0;
}
+#ifdef CONFIG_ARM64_MODULE_PLTS
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
+ !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
+ me->arch.ftrace_trampoline = (void *)s->sh_addr;
+#endif
}
return 0;
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index c7e3e6387a49..e2b7e4f9cc31 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -39,20 +39,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return res->start;
}
+#ifdef CONFIG_ACPI
/*
* Try to assign the IRQ number when probing a new device
*/
int pcibios_alloc_irq(struct pci_dev *dev)
{
- if (acpi_disabled)
- dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
-#ifdef CONFIG_ACPI
- else
- return acpi_pci_irq_enable(dev);
-#endif
+ if (!acpi_disabled)
+ acpi_pci_irq_enable(dev);
return 0;
}
+#endif
/*
* raw_pci_read/write - Platform-specific PCI config space access.
@@ -108,7 +106,10 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
if (!acpi_disabled) {
struct pci_config_window *cfg = bridge->bus->sysdata;
struct acpi_device *adev = to_acpi_device(cfg->parent);
+ struct device *bus_dev = &bridge->bus->dev;
+
ACPI_COMPANION_SET(&bridge->dev, adev);
+ set_dev_node(bus_dev, acpi_get_node(acpi_device_handle(adev)));
}
return 0;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 83a1b1ad189f..b5798ba21189 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -529,7 +529,7 @@ static struct attribute_group armv8_pmuv3_events_attr_group = {
.is_visible = armv8pmu_event_attr_is_visible,
};
-PMU_FORMAT_ATTR(event, "config:0-9");
+PMU_FORMAT_ATTR(event, "config:0-15");
static struct attribute *armv8_pmuv3_format_attrs[] = {
&format_attr_event.attr,
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index c5c45942fb6e..d849d9804011 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -522,9 +522,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
pr_err("current sp %lx does not match saved sp %lx\n",
orig_sp, stack_addr);
pr_err("Saved registers for jprobe %p\n", jp);
- show_regs(saved_regs);
+ __show_regs(saved_regs);
pr_err("Current registers\n");
- show_regs(regs);
+ __show_regs(regs);
BUG();
}
unpause_graph_tracing();
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ae2a835898d7..c8f7d98d8cb9 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -210,6 +210,7 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs)
{
__show_regs(regs);
+ dump_backtrace(regs, NULL);
}
static void tls_thread_flush(void)
@@ -297,12 +298,16 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
return 0;
}
+void tls_preserve_current_state(void)
+{
+ *task_user_tls(current) = read_sysreg(tpidr_el0);
+}
+
static void tls_thread_switch(struct task_struct *next)
{
unsigned long tpidr, tpidrro;
- tpidr = read_sysreg(tpidr_el0);
- *task_user_tls(current) = tpidr;
+ tls_preserve_current_state();
tpidr = *task_user_tls(next);
tpidrro = is_compat_thread(task_thread_info(next)) ?
@@ -355,6 +360,8 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
/*
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
+ * This full barrier is also required by the membarrier system
+ * call.
*/
dsb(ish);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index c142459a88f3..1b38c0150aec 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -623,6 +623,10 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
{
struct user_fpsimd_state *uregs;
uregs = &target->thread.fpsimd_state.user_fpsimd;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
}
@@ -648,6 +652,10 @@ static int tls_get(struct task_struct *target, const struct user_regset *regset,
void *kbuf, void __user *ubuf)
{
unsigned long *tls = &target->thread.tp_value;
+
+ if (target == current)
+ tls_preserve_current_state();
+
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
}
@@ -894,21 +902,27 @@ static int compat_vfp_get(struct task_struct *target,
{
struct user_fpsimd_state *uregs;
compat_ulong_t fpscr;
- int ret;
+ int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd;
+ if (target == current)
+ fpsimd_preserve_current_state();
+
/*
* The VFP registers are packed into the fpsimd_state, so they all sit
* nicely together for us. We just need to create the fpscr separately.
*/
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
- VFP_STATE_SIZE - sizeof(compat_ulong_t));
+ vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
+ 0, vregs_end_pos);
if (count && !ret) {
fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
(uregs->fpcr & VFP_FPSCR_CTRL_MASK);
- ret = put_user(fpscr, (compat_ulong_t *)ubuf);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
+ vregs_end_pos, VFP_STATE_SIZE);
}
return ret;
@@ -921,20 +935,21 @@ static int compat_vfp_set(struct task_struct *target,
{
struct user_fpsimd_state *uregs;
compat_ulong_t fpscr;
- int ret;
-
- if (pos + count > VFP_STATE_SIZE)
- return -EIO;
+ int ret, vregs_end_pos;
uregs = &target->thread.fpsimd_state.user_fpsimd;
+ vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
- VFP_STATE_SIZE - sizeof(compat_ulong_t));
+ vregs_end_pos);
if (count && !ret) {
- ret = get_user(fpscr, (compat_ulong_t *)ubuf);
- uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
- uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
+ vregs_end_pos, VFP_STATE_SIZE);
+ if (!ret) {
+ uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
+ uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
+ }
}
fpsimd_flush_task_state(target);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 2c822ef94f34..d4b740538ad5 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -194,6 +194,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
}
name = of_flat_dt_get_machine_name();
+ if (!name)
+ return;
+
pr_info("Machine model: %s\n", name);
dump_stack_set_arch_desc("%s (DT)", name);
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 0f0279148bdc..e3e3293d1123 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -19,10 +19,14 @@
#include <linux/compat.h>
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/freezer.h>
+#include <linux/stddef.h>
#include <linux/uaccess.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
#include <linux/tracehook.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
@@ -42,10 +46,133 @@
struct rt_sigframe {
struct siginfo info;
struct ucontext uc;
+};
+
+struct frame_record {
u64 fp;
u64 lr;
};
+struct rt_sigframe_user_layout {
+ struct rt_sigframe __user *sigframe;
+ struct frame_record __user *next_frame;
+
+ unsigned long size; /* size of allocated sigframe data */
+ unsigned long limit; /* largest allowed size */
+
+ unsigned long fpsimd_offset;
+ unsigned long esr_offset;
+ unsigned long extra_offset;
+ unsigned long end_offset;
+};
+
+#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
+#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
+#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
+
+static void init_user_layout(struct rt_sigframe_user_layout *user)
+{
+ const size_t reserved_size =
+ sizeof(user->sigframe->uc.uc_mcontext.__reserved);
+
+ memset(user, 0, sizeof(*user));
+ user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
+
+ user->limit = user->size + reserved_size;
+
+ user->limit -= TERMINATOR_SIZE;
+ user->limit -= EXTRA_CONTEXT_SIZE;
+ /* Reserve space for extension and terminator ^ */
+}
+
+static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
+{
+ return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
+}
+
+/*
+ * Sanity limit on the approximate maximum size of signal frame we'll
+ * try to generate. Stack alignment padding and the frame record are
+ * not taken into account. This limit is not a guarantee and is
+ * NOT ABI.
+ */
+#define SIGFRAME_MAXSZ SZ_64K
+
+static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
+ unsigned long *offset, size_t size, bool extend)
+{
+ size_t padded_size = round_up(size, 16);
+
+ if (padded_size > user->limit - user->size &&
+ !user->extra_offset &&
+ extend) {
+ int ret;
+
+ user->limit += EXTRA_CONTEXT_SIZE;
+ ret = __sigframe_alloc(user, &user->extra_offset,
+ sizeof(struct extra_context), false);
+ if (ret) {
+ user->limit -= EXTRA_CONTEXT_SIZE;
+ return ret;
+ }
+
+ /* Reserve space for the __reserved[] terminator */
+ user->size += TERMINATOR_SIZE;
+
+ /*
+ * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
+ * the terminator:
+ */
+ user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
+ }
+
+ /* Still not enough space? Bad luck! */
+ if (padded_size > user->limit - user->size)
+ return -ENOMEM;
+
+ *offset = user->size;
+ user->size += padded_size;
+
+ return 0;
+}
+
+/*
+ * Allocate space for an optional record of <size> bytes in the user
+ * signal frame. The offset from the signal frame base address to the
+ * allocated block is assigned to *offset.
+ */
+static int sigframe_alloc(struct rt_sigframe_user_layout *user,
+ unsigned long *offset, size_t size)
+{
+ return __sigframe_alloc(user, offset, size, true);
+}
+
+/* Allocate the null terminator record and prevent further allocations */
+static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
+{
+ int ret;
+
+ /* Un-reserve the space reserved for the terminator: */
+ user->limit += TERMINATOR_SIZE;
+
+ ret = sigframe_alloc(user, &user->end_offset,
+ sizeof(struct _aarch64_ctx));
+ if (ret)
+ return ret;
+
+ /* Prevent further allocation: */
+ user->limit = user->size;
+ return 0;
+}
+
+static void __user *apply_user_offset(
+ struct rt_sigframe_user_layout const *user, unsigned long offset)
+{
+ char __user *base = (char __user *)user->sigframe;
+
+ return base + offset;
+}
+
static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
{
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
@@ -93,12 +220,159 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
return err ? -EFAULT : 0;
}
+struct user_ctxs {
+ struct fpsimd_context __user *fpsimd;
+};
+
+static int parse_user_sigframe(struct user_ctxs *user,
+ struct rt_sigframe __user *sf)
+{
+ struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
+ struct _aarch64_ctx __user *head;
+ char __user *base = (char __user *)&sc->__reserved;
+ size_t offset = 0;
+ size_t limit = sizeof(sc->__reserved);
+ bool have_extra_context = false;
+ char const __user *const sfp = (char const __user *)sf;
+
+ user->fpsimd = NULL;
+
+ if (!IS_ALIGNED((unsigned long)base, 16))
+ goto invalid;
+
+ while (1) {
+ int err = 0;
+ u32 magic, size;
+ char const __user *userp;
+ struct extra_context const __user *extra;
+ u64 extra_datap;
+ u32 extra_size;
+ struct _aarch64_ctx const __user *end;
+ u32 end_magic, end_size;
+
+ if (limit - offset < sizeof(*head))
+ goto invalid;
+
+ if (!IS_ALIGNED(offset, 16))
+ goto invalid;
+
+ head = (struct _aarch64_ctx __user *)(base + offset);
+ __get_user_error(magic, &head->magic, err);
+ __get_user_error(size, &head->size, err);
+ if (err)
+ return err;
+
+ if (limit - offset < size)
+ goto invalid;
+
+ switch (magic) {
+ case 0:
+ if (size)
+ goto invalid;
+
+ goto done;
+
+ case FPSIMD_MAGIC:
+ if (user->fpsimd)
+ goto invalid;
+
+ if (size < sizeof(*user->fpsimd))
+ goto invalid;
+
+ user->fpsimd = (struct fpsimd_context __user *)head;
+ break;
+
+ case ESR_MAGIC:
+ /* ignore */
+ break;
+
+ case EXTRA_MAGIC:
+ if (have_extra_context)
+ goto invalid;
+
+ if (size < sizeof(*extra))
+ goto invalid;
+
+ userp = (char const __user *)head;
+
+ extra = (struct extra_context const __user *)userp;
+ userp += size;
+
+ __get_user_error(extra_datap, &extra->datap, err);
+ __get_user_error(extra_size, &extra->size, err);
+ if (err)
+ return err;
+
+ /* Check for the dummy terminator in __reserved[]: */
+
+ if (limit - offset - size < TERMINATOR_SIZE)
+ goto invalid;
+
+ end = (struct _aarch64_ctx const __user *)userp;
+ userp += TERMINATOR_SIZE;
+
+ __get_user_error(end_magic, &end->magic, err);
+ __get_user_error(end_size, &end->size, err);
+ if (err)
+ return err;
+
+ if (end_magic || end_size)
+ goto invalid;
+
+ /* Prevent looping/repeated parsing of extra_context */
+ have_extra_context = true;
+
+ base = (__force void __user *)extra_datap;
+ if (!IS_ALIGNED((unsigned long)base, 16))
+ goto invalid;
+
+ if (!IS_ALIGNED(extra_size, 16))
+ goto invalid;
+
+ if (base != userp)
+ goto invalid;
+
+ /* Reject "unreasonably large" frames: */
+ if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
+ goto invalid;
+
+ /*
+ * Ignore trailing terminator in __reserved[]
+ * and start parsing extra data:
+ */
+ offset = 0;
+ limit = extra_size;
+ continue;
+
+ default:
+ goto invalid;
+ }
+
+ if (size < sizeof(*head))
+ goto invalid;
+
+ if (limit - offset < size)
+ goto invalid;
+
+ offset += size;
+ }
+
+done:
+ if (!user->fpsimd)
+ goto invalid;
+
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
static int restore_sigframe(struct pt_regs *regs,
struct rt_sigframe __user *sf)
{
sigset_t set;
int i, err;
- void *aux = sf->uc.uc_mcontext.__reserved;
+ struct user_ctxs user;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0)
@@ -117,12 +391,11 @@ static int restore_sigframe(struct pt_regs *regs,
regs->syscallno = ~0UL;
err |= !valid_user_regs(&regs->user_regs, current);
+ if (err == 0)
+ err = parse_user_sigframe(&user, sf);
- if (err == 0) {
- struct fpsimd_context *fpsimd_ctx =
- container_of(aux, struct fpsimd_context, head);
- err |= restore_fpsimd_context(fpsimd_ctx);
- }
+ if (err == 0)
+ err = restore_fpsimd_context(user.fpsimd);
return err;
}
@@ -163,16 +436,37 @@ badframe:
return 0;
}
-static int setup_sigframe(struct rt_sigframe __user *sf,
+/* Determine the layout of optional records in the signal frame */
+static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
+{
+ int err;
+
+ err = sigframe_alloc(user, &user->fpsimd_offset,
+ sizeof(struct fpsimd_context));
+ if (err)
+ return err;
+
+ /* fault information, if valid */
+ if (current->thread.fault_code) {
+ err = sigframe_alloc(user, &user->esr_offset,
+ sizeof(struct esr_context));
+ if (err)
+ return err;
+ }
+
+ return sigframe_alloc_end(user);
+}
+
+
+static int setup_sigframe(struct rt_sigframe_user_layout *user,
struct pt_regs *regs, sigset_t *set)
{
int i, err = 0;
- void *aux = sf->uc.uc_mcontext.__reserved;
- struct _aarch64_ctx *end;
+ struct rt_sigframe __user *sf = user->sigframe;
/* set up the stack frame for unwinding */
- __put_user_error(regs->regs[29], &sf->fp, err);
- __put_user_error(regs->regs[30], &sf->lr, err);
+ __put_user_error(regs->regs[29], &user->next_frame->fp, err);
+ __put_user_error(regs->regs[30], &user->next_frame->lr, err);
for (i = 0; i < 31; i++)
__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
@@ -186,58 +480,103 @@ static int setup_sigframe(struct rt_sigframe __user *sf,
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
if (err == 0) {
- struct fpsimd_context *fpsimd_ctx =
- container_of(aux, struct fpsimd_context, head);
+ struct fpsimd_context __user *fpsimd_ctx =
+ apply_user_offset(user, user->fpsimd_offset);
err |= preserve_fpsimd_context(fpsimd_ctx);
- aux += sizeof(*fpsimd_ctx);
}
/* fault information, if valid */
- if (current->thread.fault_code) {
- struct esr_context *esr_ctx =
- container_of(aux, struct esr_context, head);
+ if (err == 0 && user->esr_offset) {
+ struct esr_context __user *esr_ctx =
+ apply_user_offset(user, user->esr_offset);
+
__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
- aux += sizeof(*esr_ctx);
+ }
+
+ if (err == 0 && user->extra_offset) {
+ char __user *sfp = (char __user *)user->sigframe;
+ char __user *userp =
+ apply_user_offset(user, user->extra_offset);
+
+ struct extra_context __user *extra;
+ struct _aarch64_ctx __user *end;
+ u64 extra_datap;
+ u32 extra_size;
+
+ extra = (struct extra_context __user *)userp;
+ userp += EXTRA_CONTEXT_SIZE;
+
+ end = (struct _aarch64_ctx __user *)userp;
+ userp += TERMINATOR_SIZE;
+
+ /*
+ * extra_datap is just written to the signal frame.
+ * The value gets cast back to a void __user *
+ * during sigreturn.
+ */
+ extra_datap = (__force u64)userp;
+ extra_size = sfp + round_up(user->size, 16) - userp;
+
+ __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
+ __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
+ __put_user_error(extra_datap, &extra->datap, err);
+ __put_user_error(extra_size, &extra->size, err);
+
+ /* Add the terminator */
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
}
/* set the "end" magic */
- end = aux;
- __put_user_error(0, &end->magic, err);
- __put_user_error(0, &end->size, err);
+ if (err == 0) {
+ struct _aarch64_ctx __user *end =
+ apply_user_offset(user, user->end_offset);
+
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
+ }
return err;
}
-static struct rt_sigframe __user *get_sigframe(struct ksignal *ksig,
- struct pt_regs *regs)
+static int get_sigframe(struct rt_sigframe_user_layout *user,
+ struct ksignal *ksig, struct pt_regs *regs)
{
unsigned long sp, sp_top;
- struct rt_sigframe __user *frame;
+ int err;
+
+ init_user_layout(user);
+ err = setup_sigframe_layout(user);
+ if (err)
+ return err;
sp = sp_top = sigsp(regs->sp, ksig);
- sp = (sp - sizeof(struct rt_sigframe)) & ~15;
- frame = (struct rt_sigframe __user *)sp;
+ sp = round_down(sp - sizeof(struct frame_record), 16);
+ user->next_frame = (struct frame_record __user *)sp;
+
+ sp = round_down(sp, 16) - sigframe_size(user);
+ user->sigframe = (struct rt_sigframe __user *)sp;
/*
* Check that we can actually write to the signal frame.
*/
- if (!access_ok(VERIFY_WRITE, frame, sp_top - sp))
- frame = NULL;
+ if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp))
+ return -EFAULT;
- return frame;
+ return 0;
}
static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
- void __user *frame, int usig)
+ struct rt_sigframe_user_layout *user, int usig)
{
__sigrestore_t sigtramp;
regs->regs[0] = usig;
- regs->sp = (unsigned long)frame;
- regs->regs[29] = regs->sp + offsetof(struct rt_sigframe, fp);
+ regs->sp = (unsigned long)user->sigframe;
+ regs->regs[29] = (unsigned long)&user->next_frame->fp;
regs->pc = (unsigned long)ka->sa.sa_handler;
if (ka->sa.sa_flags & SA_RESTORER)
@@ -251,20 +590,22 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
+ struct rt_sigframe_user_layout user;
struct rt_sigframe __user *frame;
int err = 0;
- frame = get_sigframe(ksig, regs);
- if (!frame)
+ if (get_sigframe(&user, ksig, regs))
return 1;
+ frame = user.sigframe;
+
__put_user_error(0, &frame->uc.uc_flags, err);
__put_user_error(NULL, &frame->uc.uc_link, err);
err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
- err |= setup_sigframe(frame, regs, set);
+ err |= setup_sigframe(&user, regs, set);
if (err == 0) {
- setup_return(regs, &ksig->ka, frame, usig);
+ setup_return(regs, &ksig->ka, &user, usig);
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
regs->regs[1] = (unsigned long)&frame->info;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 321119881abf..dc66e6ec3a99 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -469,7 +469,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
*/
cell = of_get_property(dn, "reg", NULL);
if (!cell) {
- pr_err("%s: missing reg property\n", dn->full_name);
+ pr_err("%pOF: missing reg property\n", dn);
return INVALID_HWID;
}
@@ -478,7 +478,7 @@ static u64 __init of_get_cpu_mpidr(struct device_node *dn)
* Non affinity bits must be set to 0 in the DT
*/
if (hwid & ~MPIDR_HWID_BITMASK) {
- pr_err("%s: invalid reg property\n", dn->full_name);
+ pr_err("%pOF: invalid reg property\n", dn);
return INVALID_HWID;
}
return hwid;
@@ -627,8 +627,8 @@ static void __init of_parse_and_init_cpus(void)
goto next;
if (is_mpidr_duplicate(cpu_count, hwid)) {
- pr_err("%s: duplicate cpu reg properties in the DT\n",
- dn->full_name);
+ pr_err("%pOF: duplicate cpu reg properties in the DT\n",
+ dn);
goto next;
}
@@ -640,8 +640,8 @@ static void __init of_parse_and_init_cpus(void)
*/
if (hwid == cpu_logical_map(0)) {
if (bootcpu_valid) {
- pr_err("%s: duplicate boot cpu reg property in DT\n",
- dn->full_name);
+ pr_err("%pOF: duplicate boot cpu reg property in DT\n",
+ dn);
goto next;
}
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index feac80c22f61..09d37d66b630 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -210,6 +210,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
put_task_stack(tsk);
}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 08243533e5ee..8d48b233e6ce 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -11,7 +11,7 @@
* for more details.
*/
-#include <linux/acpi.h>
+#include <linux/arch_topology.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@@ -23,227 +23,11 @@
#include <linux/sched/topology.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/cpufreq.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/topology.h>
-static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
-static DEFINE_MUTEX(cpu_scale_mutex);
-
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
- return per_cpu(cpu_scale, cpu);
-}
-
-static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
-{
- per_cpu(cpu_scale, cpu) = capacity;
-}
-
-static ssize_t cpu_capacity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sprintf(buf, "%lu\n",
- arch_scale_cpu_capacity(NULL, cpu->dev.id));
-}
-
-static ssize_t cpu_capacity_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- int this_cpu = cpu->dev.id, i;
- unsigned long new_capacity;
- ssize_t ret;
-
- if (count) {
- ret = kstrtoul(buf, 0, &new_capacity);
- if (ret)
- return ret;
- if (new_capacity > SCHED_CAPACITY_SCALE)
- return -EINVAL;
-
- mutex_lock(&cpu_scale_mutex);
- for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
- set_capacity_scale(i, new_capacity);
- mutex_unlock(&cpu_scale_mutex);
- }
-
- return count;
-}
-
-static DEVICE_ATTR_RW(cpu_capacity);
-
-static int register_cpu_capacity_sysctl(void)
-{
- int i;
- struct device *cpu;
-
- for_each_possible_cpu(i) {
- cpu = get_cpu_device(i);
- if (!cpu) {
- pr_err("%s: too early to get CPU%d device!\n",
- __func__, i);
- continue;
- }
- device_create_file(cpu, &dev_attr_cpu_capacity);
- }
-
- return 0;
-}
-subsys_initcall(register_cpu_capacity_sysctl);
-
-static u32 capacity_scale;
-static u32 *raw_capacity;
-static bool cap_parsing_failed;
-
-static void __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
-{
- int ret;
- u32 cpu_capacity;
-
- if (cap_parsing_failed)
- return;
-
- ret = of_property_read_u32(cpu_node,
- "capacity-dmips-mhz",
- &cpu_capacity);
- if (!ret) {
- if (!raw_capacity) {
- raw_capacity = kcalloc(num_possible_cpus(),
- sizeof(*raw_capacity),
- GFP_KERNEL);
- if (!raw_capacity) {
- pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
- cap_parsing_failed = true;
- return;
- }
- }
- capacity_scale = max(cpu_capacity, capacity_scale);
- raw_capacity[cpu] = cpu_capacity;
- pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
- cpu_node->full_name, raw_capacity[cpu]);
- } else {
- if (raw_capacity) {
- pr_err("cpu_capacity: missing %s raw capacity\n",
- cpu_node->full_name);
- pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
- }
- cap_parsing_failed = true;
- kfree(raw_capacity);
- }
-}
-
-static void normalize_cpu_capacity(void)
-{
- u64 capacity;
- int cpu;
-
- if (!raw_capacity || cap_parsing_failed)
- return;
-
- pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
- mutex_lock(&cpu_scale_mutex);
- for_each_possible_cpu(cpu) {
- pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
- cpu, raw_capacity[cpu]);
- capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
- / capacity_scale;
- set_capacity_scale(cpu, capacity);
- pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
- cpu, arch_scale_cpu_capacity(NULL, cpu));
- }
- mutex_unlock(&cpu_scale_mutex);
-}
-
-#ifdef CONFIG_CPU_FREQ
-static cpumask_var_t cpus_to_visit;
-static bool cap_parsing_done;
-static void parsing_done_workfn(struct work_struct *work);
-static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
-
-static int
-init_cpu_capacity_callback(struct notifier_block *nb,
- unsigned long val,
- void *data)
-{
- struct cpufreq_policy *policy = data;
- int cpu;
-
- if (cap_parsing_failed || cap_parsing_done)
- return 0;
-
- switch (val) {
- case CPUFREQ_NOTIFY:
- pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
- cpumask_pr_args(policy->related_cpus),
- cpumask_pr_args(cpus_to_visit));
- cpumask_andnot(cpus_to_visit,
- cpus_to_visit,
- policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
- if (cpumask_empty(cpus_to_visit)) {
- normalize_cpu_capacity();
- kfree(raw_capacity);
- pr_debug("cpu_capacity: parsing done\n");
- cap_parsing_done = true;
- schedule_work(&parsing_done_work);
- }
- }
- return 0;
-}
-
-static struct notifier_block init_cpu_capacity_notifier = {
- .notifier_call = init_cpu_capacity_callback,
-};
-
-static int __init register_cpufreq_notifier(void)
-{
- /*
- * on ACPI-based systems we need to use the default cpu capacity
- * until we have the necessary code to parse the cpu capacity, so
- * skip registering cpufreq notifier.
- */
- if (!acpi_disabled || cap_parsing_failed)
- return -EINVAL;
-
- if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
- pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
- return -ENOMEM;
- }
- cpumask_copy(cpus_to_visit, cpu_possible_mask);
-
- return cpufreq_register_notifier(&init_cpu_capacity_notifier,
- CPUFREQ_POLICY_NOTIFIER);
-}
-core_initcall(register_cpufreq_notifier);
-
-static void parsing_done_workfn(struct work_struct *work)
-{
- cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
- CPUFREQ_POLICY_NOTIFIER);
-}
-
-#else
-static int __init free_raw_capacity(void)
-{
- kfree(raw_capacity);
-
- return 0;
-}
-core_initcall(free_raw_capacity);
-#endif
-
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -255,13 +39,13 @@ static int __init get_cpu_for_node(struct device_node *node)
for_each_possible_cpu(cpu) {
if (of_get_cpu_node(cpu, NULL) == cpu_node) {
- parse_cpu_capacity(cpu_node, cpu);
+ topology_parse_cpu_capacity(cpu_node, cpu);
of_node_put(cpu_node);
return cpu;
}
}
- pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+ pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
of_node_put(cpu_node);
return -1;
@@ -287,8 +71,8 @@ static int __init parse_core(struct device_node *core, int cluster_id,
cpu_topology[cpu].core_id = core_id;
cpu_topology[cpu].thread_id = i;
} else {
- pr_err("%s: Can't get CPU for thread\n",
- t->full_name);
+ pr_err("%pOF: Can't get CPU for thread\n",
+ t);
of_node_put(t);
return -EINVAL;
}
@@ -300,15 +84,15 @@ static int __init parse_core(struct device_node *core, int cluster_id,
cpu = get_cpu_for_node(core);
if (cpu >= 0) {
if (!leaf) {
- pr_err("%s: Core has both threads and CPU\n",
- core->full_name);
+ pr_err("%pOF: Core has both threads and CPU\n",
+ core);
return -EINVAL;
}
cpu_topology[cpu].cluster_id = cluster_id;
cpu_topology[cpu].core_id = core_id;
} else if (leaf) {
- pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ pr_err("%pOF: Can't get CPU for leaf core\n", core);
return -EINVAL;
}
@@ -353,8 +137,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
has_cores = true;
if (depth == 0) {
- pr_err("%s: cpu-map children should be clusters\n",
- c->full_name);
+ pr_err("%pOF: cpu-map children should be clusters\n",
+ c);
of_node_put(c);
return -EINVAL;
}
@@ -362,8 +146,8 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
if (leaf) {
ret = parse_core(c, cluster_id, core_id++);
} else {
- pr_err("%s: Non-leaf cluster with core %s\n",
- cluster->full_name, name);
+ pr_err("%pOF: Non-leaf cluster with core %s\n",
+ cluster, name);
ret = -EINVAL;
}
@@ -375,7 +159,7 @@ static int __init parse_cluster(struct device_node *cluster, int depth)
} while (c);
if (leaf && !has_cores)
- pr_warn("%s: empty cluster\n", cluster->full_name);
+ pr_warn("%pOF: empty cluster\n", cluster);
if (leaf)
cluster_id++;
@@ -400,16 +184,14 @@ static int __init parse_dt_topology(void)
* cluster with restricted subnodes.
*/
map = of_get_child_by_name(cn, "cpu-map");
- if (!map) {
- cap_parsing_failed = true;
+ if (!map)
goto out;
- }
ret = parse_cluster(map, 0);
if (ret != 0)
goto out_map;
- normalize_cpu_capacity();
+ topology_normalize_cpu_scale();
/*
* Check that all cores are in the topology; the SMP code will
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 0805b44f986a..8a62648848e5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -140,7 +140,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
-static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
unsigned long irq_stack_ptr;
@@ -274,10 +274,12 @@ static DEFINE_RAW_SPINLOCK(die_lock);
void die(const char *str, struct pt_regs *regs, int err)
{
int ret;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&die_lock, flags);
oops_enter();
- raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
ret = __die(str, err, regs);
@@ -287,13 +289,15 @@ void die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
+
+ raw_spin_unlock_irqrestore(&die_lock, flags);
+
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
}
@@ -344,22 +348,24 @@ static int call_undef_hook(struct pt_regs *regs)
if (compat_thumb_mode(regs)) {
/* 16-bit Thumb instruction */
- if (get_user(instr, (u16 __user *)pc))
+ __le16 instr_le;
+ if (get_user(instr_le, (__le16 __user *)pc))
goto exit;
- instr = le16_to_cpu(instr);
+ instr = le16_to_cpu(instr_le);
if (aarch32_insn_is_wide(instr)) {
u32 instr2;
- if (get_user(instr2, (u16 __user *)(pc + 2)))
+ if (get_user(instr_le, (__le16 __user *)(pc + 2)))
goto exit;
- instr2 = le16_to_cpu(instr2);
+ instr2 = le16_to_cpu(instr_le);
instr = (instr << 16) | instr2;
}
} else {
/* 32-bit ARM instruction */
- if (get_user(instr, (u32 __user *)pc))
+ __le32 instr_le;
+ if (get_user(instr_le, (__le32 __user *)pc))
goto exit;
- instr = le32_to_cpu(instr);
+ instr = le32_to_cpu(instr_le);
}
raw_spin_lock_irqsave(&undef_lock, flags);
@@ -517,7 +523,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
{
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
- pt_regs_write_reg(regs, rt, read_sysreg(cntfrq_el0));
+ pt_regs_write_reg(regs, rt, arch_timer_get_rate());
regs->pc += 4;
}
@@ -728,8 +734,6 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
break;
case BUG_TRAP_TYPE_WARN:
- /* Ideally, report_bug() should backtrace for us... but no. */
- dump_backtrace(regs, NULL);
break;
default:
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 7492d9009610..e8f759f764f2 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -37,7 +37,7 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-extern char vdso_start, vdso_end;
+extern char vdso_start[], vdso_end[];
static unsigned long vdso_pages __ro_after_init;
/*
@@ -125,14 +125,14 @@ static int __init vdso_init(void)
struct page **vdso_pagelist;
unsigned long pfn;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
+ if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
- vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
+ vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -145,7 +145,7 @@ static int __init vdso_init(void)
/* Grab the vDSO code pages. */
- pfn = sym_to_pfn(&vdso_start);
+ pfn = sym_to_pfn(vdso_start);
for (i = 0; i < vdso_pages; i++)
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);