summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/fpsimd.c31
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c44
-rw-r--r--arch/arm64/kernel/insn.c4
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c3
-rw-r--r--arch/arm64/kernel/traps.c3
5 files changed, 52 insertions, 33 deletions
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 35cb5e66c504..55c8f3ec6705 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -12,6 +12,7 @@
#include <linux/bug.h>
#include <linux/cache.h>
#include <linux/compat.h>
+#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
#include <linux/kernel.h>
@@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
/* Default VL for tasks that don't set it explicitly: */
-static int sve_default_vl = -1;
+static int __sve_default_vl = -1;
+
+static int get_sve_default_vl(void)
+{
+ return READ_ONCE(__sve_default_vl);
+}
#ifdef CONFIG_ARM64_SVE
+static void set_sve_default_vl(int val)
+{
+ WRITE_ONCE(__sve_default_vl, val);
+}
+
/* Maximum supported vector length across all CPUs (initially poisoned) */
int __ro_after_init sve_max_vl = SVE_VL_MIN;
int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
@@ -338,13 +349,13 @@ static unsigned int find_supported_vector_length(unsigned int vl)
return sve_vl_from_vq(__bit_to_vq(bit));
}
-#ifdef CONFIG_SYSCTL
+#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;
- int vl = sve_default_vl;
+ int vl = get_sve_default_vl();
struct ctl_table tmp_table = {
.data = &vl,
.maxlen = sizeof(vl),
@@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
if (!sve_vl_valid(vl))
return -EINVAL;
- sve_default_vl = find_supported_vector_length(vl);
+ set_sve_default_vl(find_supported_vector_length(vl));
return 0;
}
@@ -383,9 +394,9 @@ static int __init sve_sysctl_init(void)
return 0;
}
-#else /* ! CONFIG_SYSCTL */
+#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
static int __init sve_sysctl_init(void) { return 0; }
-#endif /* ! CONFIG_SYSCTL */
+#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
@@ -868,7 +879,7 @@ void __init sve_setup(void)
* For the default VL, pick the maximum supported value <= 64.
* VL == 64 is guaranteed not to grow the signal frame.
*/
- sve_default_vl = find_supported_vector_length(64);
+ set_sve_default_vl(find_supported_vector_length(64));
bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
SVE_VQ_MAX);
@@ -889,7 +900,7 @@ void __init sve_setup(void)
pr_info("SVE: maximum available vector length %u bytes per vector\n",
sve_max_vl);
pr_info("SVE: default vector length %u bytes per vector\n",
- sve_default_vl);
+ get_sve_default_vl());
/* KVM decides whether to support mismatched systems. Just warn here: */
if (sve_max_virtualisable_vl < sve_max_vl)
@@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void)
* vector length configured: no kernel task can become a user
* task without an exec and hence a call to this function.
* By the time the first call to this function is made, all
- * early hardware probing is complete, so sve_default_vl
+ * early hardware probing is complete, so __sve_default_vl
* should be valid.
* If a bug causes this to go wrong, we make some noise and
* try to fudge thread.sve_vl to a safe value here.
*/
vl = current->thread.sve_vl_onexec ?
- current->thread.sve_vl_onexec : sve_default_vl;
+ current->thread.sve_vl_onexec : get_sve_default_vl();
if (WARN_ON(!sve_vl_valid(vl)))
vl = SVE_VL_MIN;
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 0b727edf4104..af234a1e08b7 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
return 0;
}
+static int watchpoint_report(struct perf_event *wp, unsigned long addr,
+ struct pt_regs *regs)
+{
+ int step = is_default_overflow_handler(wp);
+ struct arch_hw_breakpoint *info = counter_arch_bp(wp);
+
+ info->trigger = addr;
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine, then
+ * handle the stepping ourselves since userspace really can't help
+ * us with this.
+ */
+ if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
+ step = 1;
+ else
+ perf_bp_event(wp, regs);
+
+ return step;
+}
+
static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
u64 val;
struct perf_event *wp, **slots;
struct debug_info *debug_info;
- struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
slots = this_cpu_ptr(wp_on_reg);
@@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
if (dist != 0)
continue;
- info = counter_arch_bp(wp);
- info->trigger = addr;
- perf_bp_event(wp, regs);
-
- /* Do we need to handle the stepping? */
- if (is_default_overflow_handler(wp))
- step = 1;
+ step = watchpoint_report(wp, addr, regs);
}
- if (min_dist > 0 && min_dist != -1) {
- /* No exact match found. */
- wp = slots[closest_match];
- info = counter_arch_bp(wp);
- info->trigger = addr;
- perf_bp_event(wp, regs);
- /* Do we need to handle the stepping? */
- if (is_default_overflow_handler(wp))
- step = 1;
- }
+ /* No exact match found? */
+ if (min_dist > 0 && min_dist != -1)
+ step = watchpoint_report(slots[closest_match], addr, regs);
+
rcu_read_unlock();
if (!step)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 684d871ae38d..a107375005bc 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -135,7 +135,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
int ret;
__le32 val;
- ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+ ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
if (!ret)
*insnp = le32_to_cpu(val);
@@ -151,7 +151,7 @@ static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
- ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
+ ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0);
raw_spin_unlock_irqrestore(&patch_lock, flags);
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 522e6f517ec0..361a1143e09e 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -219,8 +219,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
MEMBLOCK_NONE, &start, &end, NULL)
nr_ranges++;
- cmem = kmalloc(sizeof(struct crash_mem) +
- sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
+ cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
if (!cmem)
return -ENOMEM;
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 50cc30acf106..47f651df781c 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -376,7 +376,7 @@ static int call_undef_hook(struct pt_regs *regs)
if (!user_mode(regs)) {
__le32 instr_le;
- if (probe_kernel_address((__force __le32 *)pc, instr_le))
+ if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
goto exit;
instr = le32_to_cpu(instr_le);
} else if (compat_thumb_mode(regs)) {
@@ -813,6 +813,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
+ __show_regs(regs);
local_daif_mask();
panic("bad mode");
}