summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/traps_misaligned.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/traps_misaligned.c')
-rw-r--r--arch/riscv/kernel/traps_misaligned.c116
1 files changed, 102 insertions, 14 deletions
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 77c788660223..dd8e4af6583f 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -16,6 +16,7 @@
#include <asm/entry-common.h>
#include <asm/hwprobe.h>
#include <asm/cpufeature.h>
+#include <asm/sbi.h>
#include <asm/vector.h>
#define INSN_MATCH_LB 0x3
@@ -368,9 +369,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
-#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
-#endif
if (!unaligned_enabled)
return -1;
@@ -455,7 +454,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
val.data_u64 = 0;
if (user_mode(regs)) {
- if (copy_from_user(&val, (u8 __user *)addr, len))
+ if (copy_from_user_nofault(&val, (u8 __user *)addr, len))
return -1;
} else {
memcpy(&val, (u8 *)addr, len);
@@ -556,7 +555,7 @@ static int handle_scalar_misaligned_store(struct pt_regs *regs)
return -EOPNOTSUPP;
if (user_mode(regs)) {
- if (copy_to_user((u8 __user *)addr, &val, len))
+ if (copy_to_user_nofault((u8 __user *)addr, &val, len))
return -1;
} else {
memcpy((u8 *)addr, &val, len);
@@ -626,6 +625,10 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
{
int cpu;
+ /*
+ * While being documented as very slow, schedule_on_each_cpu() is used since
+ * kernel_vector_begin() expects irqs to be enabled or it will panic()
+ */
schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu)
@@ -642,11 +645,23 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
}
#endif
+static bool all_cpus_unaligned_scalar_access_emulated(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ if (per_cpu(misaligned_access_speed, cpu) !=
+ RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
+ return false;
+
+ return true;
+}
+
#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
static bool unaligned_ctl __read_mostly;
-void check_unaligned_access_emulated(struct work_struct *work __always_unused)
+static void check_unaligned_access_emulated(void *arg __always_unused)
{
int cpu = smp_processor_id();
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
@@ -657,6 +672,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
__asm__ __volatile__ (
" "REG_L" %[tmp], 1(%[ptr])\n"
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+}
+
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+ long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+
+ check_unaligned_access_emulated(NULL);
/*
* If unaligned_ctl is already set, this means that we detected that all
@@ -665,26 +687,23 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused)
*/
if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) {
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
- while (true)
- cpu_relax();
+ return -EINVAL;
}
+
+ return 0;
}
bool __init check_unaligned_access_emulated_all_cpus(void)
{
- int cpu;
-
/*
* We can only support PR_UNALIGN controls if all CPUs have misaligned
* accesses emulated since tasks requesting such control can run on any
* CPU.
*/
- schedule_on_each_cpu(check_unaligned_access_emulated);
+ on_each_cpu(check_unaligned_access_emulated, NULL, 1);
- for_each_online_cpu(cpu)
- if (per_cpu(misaligned_access_speed, cpu)
- != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED)
- return false;
+ if (!all_cpus_unaligned_scalar_access_emulated())
+ return false;
unaligned_ctl = true;
return true;
@@ -699,4 +718,73 @@ bool __init check_unaligned_access_emulated_all_cpus(void)
{
return false;
}
+static int cpu_online_check_unaligned_access_emulated(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
+static bool misaligned_traps_delegated;
+
+#ifdef CONFIG_RISCV_SBI
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu)
+{
+ if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) &&
+ misaligned_traps_delegated) {
+ pr_crit("Misaligned trap delegation non homogeneous (expected delegated)");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void __init unaligned_access_init(void)
+{
+ int ret;
+
+ ret = sbi_fwft_set_online_cpus(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0);
+ if (ret)
+ return;
+
+ misaligned_traps_delegated = true;
+ pr_info("SBI misaligned access exception delegation ok\n");
+ /*
+ * Note that we don't have to take any specific action here, if
+ * the delegation is successful, then
+ * check_unaligned_access_emulated() will verify that indeed the
+ * platform traps on misaligned accesses.
+ */
+}
+#else
+void __init unaligned_access_init(void) {}
+
+static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused)
+{
+ return 0;
+}
+
#endif
+
+int cpu_online_unaligned_access_init(unsigned int cpu)
+{
+ int ret;
+
+ ret = cpu_online_sbi_unaligned_setup(cpu);
+ if (ret)
+ return ret;
+
+ return cpu_online_check_unaligned_access_emulated(cpu);
+}
+
+bool misaligned_traps_can_delegate(void)
+{
+ /*
+ * Either we successfully requested misaligned traps delegation for all
+ * CPUs, or the SBI does not implement the FWFT extension but delegated
+ * the exception by default.
+ */
+ return misaligned_traps_delegated ||
+ all_cpus_unaligned_scalar_access_emulated();
+}
+EXPORT_SYMBOL_GPL(misaligned_traps_can_delegate);