summaryrefslogtreecommitdiff
path: root/arch/riscv/kernel/sys_hwprobe.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/kernel/sys_hwprobe.c')
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c223
1 files changed, 188 insertions, 35 deletions
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
index 969ef3d59dbe..0f701ace3bb9 100644
--- a/arch/riscv/kernel/sys_hwprobe.c
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -5,14 +5,22 @@
* more details.
*/
#include <linux/syscalls.h>
+#include <linux/completion.h>
+#include <linux/atomic.h>
+#include <linux/once.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwprobe.h>
+#include <asm/processor.h>
+#include <asm/delay.h>
#include <asm/sbi.h>
#include <asm/switch_to.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/vector.h>
+#include <asm/vendor_extensions/mips_hwprobe.h>
+#include <asm/vendor_extensions/sifive_hwprobe.h>
+#include <asm/vendor_extensions/thead_hwprobe.h>
#include <vdso/vsyscall.h>
@@ -23,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
bool first = true;
int cpu;
+ if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
+ pair->key != RISCV_HWPROBE_KEY_MIMPID &&
+ pair->key != RISCV_HWPROBE_KEY_MARCHID)
+ goto out;
+
for_each_cpu(cpu, cpus) {
u64 cpu_id;
@@ -53,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair,
}
}
+out:
pair->value = id;
}
@@ -69,7 +83,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
if (riscv_isa_extension_available(NULL, c))
pair->value |= RISCV_HWPROBE_IMA_C;
- if (has_vector())
+ if (has_vector() && riscv_isa_extension_available(NULL, v))
pair->value |= RISCV_HWPROBE_IMA_V;
/*
@@ -92,30 +106,55 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
* regardless of the kernel's configuration, as no other checks, besides
* presence in the hart_isa bitmap, are made.
*/
+ EXT_KEY(ZAAMO);
+ EXT_KEY(ZABHA);
+ EXT_KEY(ZACAS);
+ EXT_KEY(ZALASR);
+ EXT_KEY(ZALRSC);
+ EXT_KEY(ZAWRS);
EXT_KEY(ZBA);
EXT_KEY(ZBB);
- EXT_KEY(ZBS);
- EXT_KEY(ZICBOZ);
EXT_KEY(ZBC);
-
EXT_KEY(ZBKB);
EXT_KEY(ZBKC);
EXT_KEY(ZBKX);
+ EXT_KEY(ZBS);
+ EXT_KEY(ZCA);
+ EXT_KEY(ZCB);
+ EXT_KEY(ZCMOP);
+ EXT_KEY(ZICBOM);
+ EXT_KEY(ZICBOP);
+ EXT_KEY(ZICBOZ);
+ EXT_KEY(ZICNTR);
+ EXT_KEY(ZICOND);
+ EXT_KEY(ZIHINTNTL);
+ EXT_KEY(ZIHINTPAUSE);
+ EXT_KEY(ZIHPM);
+ EXT_KEY(ZIMOP);
EXT_KEY(ZKND);
EXT_KEY(ZKNE);
EXT_KEY(ZKNH);
EXT_KEY(ZKSED);
EXT_KEY(ZKSH);
EXT_KEY(ZKT);
- EXT_KEY(ZIHINTNTL);
EXT_KEY(ZTSO);
- EXT_KEY(ZACAS);
- EXT_KEY(ZICOND);
- EXT_KEY(ZIHINTPAUSE);
+ /*
+ * All the following extensions must depend on the kernel
+ * support of V.
+ */
if (has_vector()) {
EXT_KEY(ZVBB);
EXT_KEY(ZVBC);
+ EXT_KEY(ZVE32F);
+ EXT_KEY(ZVE32X);
+ EXT_KEY(ZVE64D);
+ EXT_KEY(ZVE64F);
+ EXT_KEY(ZVE64X);
+ EXT_KEY(ZVFBFMIN);
+ EXT_KEY(ZVFBFWMA);
+ EXT_KEY(ZVFH);
+ EXT_KEY(ZVFHMIN);
EXT_KEY(ZVKB);
EXT_KEY(ZVKG);
EXT_KEY(ZVKNED);
@@ -124,15 +163,17 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
EXT_KEY(ZVKSED);
EXT_KEY(ZVKSH);
EXT_KEY(ZVKT);
- EXT_KEY(ZVFH);
- EXT_KEY(ZVFHMIN);
}
- if (has_fpu()) {
- EXT_KEY(ZFH);
- EXT_KEY(ZFHMIN);
- EXT_KEY(ZFA);
- }
+ EXT_KEY(ZCD);
+ EXT_KEY(ZCF);
+ EXT_KEY(ZFA);
+ EXT_KEY(ZFBFMIN);
+ EXT_KEY(ZFH);
+ EXT_KEY(ZFHMIN);
+
+ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
+ EXT_KEY(SUPM);
#undef EXT_KEY
}
@@ -140,7 +181,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
pair->value &= ~missing;
}
-static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
{
struct riscv_hwprobe pair;
@@ -161,13 +202,13 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)
perf = this_perf;
if (perf != this_perf) {
- perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+ perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
break;
}
}
if (perf == -1ULL)
- return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+ return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
return perf;
}
@@ -175,12 +216,49 @@ static u64 hwprobe_misaligned(const struct cpumask *cpus)
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
- return RISCV_HWPROBE_MISALIGNED_FAST;
+ return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
- return RISCV_HWPROBE_MISALIGNED_EMULATED;
+ return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
- return RISCV_HWPROBE_MISALIGNED_SLOW;
+ return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
+}
+#endif
+
+#ifdef CONFIG_RISCV_VECTOR_MISALIGNED
+static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
+{
+ int cpu;
+ u64 perf = -1ULL;
+
+ /* Return if supported or not even if speed wasn't probed */
+ for_each_cpu(cpu, cpus) {
+ int this_perf = per_cpu(vector_misaligned_access, cpu);
+
+ if (perf == -1ULL)
+ perf = this_perf;
+
+ if (perf != this_perf) {
+ perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
+ break;
+ }
+ }
+
+ if (perf == -1ULL)
+ return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
+
+ return perf;
+}
+#else
+static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
+{
+ if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
+ return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
+
+ if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
+ return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
+
+ return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
}
#endif
@@ -208,14 +286,47 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
break;
case RISCV_HWPROBE_KEY_CPUPERF_0:
+ case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
pair->value = hwprobe_misaligned(cpus);
break;
+ case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
+ pair->value = hwprobe_vec_misaligned(cpus);
+ break;
+
case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
pair->value = 0;
if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
pair->value = riscv_cboz_block_size;
break;
+ case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM))
+ pair->value = riscv_cbom_block_size;
+ break;
+ case RISCV_HWPROBE_KEY_ZICBOP_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOP))
+ pair->value = riscv_cbop_block_size;
+ break;
+ case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
+ pair->value = user_max_virt_addr();
+ break;
+
+ case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
+ pair->value = riscv_timebase;
+ break;
+
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0:
+ hwprobe_isa_vendor_ext_sifive_0(pair, cpus);
+ break;
+
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
+ hwprobe_isa_vendor_ext_thead_0(pair, cpus);
+ break;
+ case RISCV_HWPROBE_KEY_VENDOR_EXT_MIPS_0:
+ hwprobe_isa_vendor_ext_mips_0(pair, cpus);
+ break;
/*
* For forward compatibility, unknown keys don't fail the whole
@@ -359,29 +470,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
return 0;
}
-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
- size_t pair_count, size_t cpusetsize,
- unsigned long __user *cpus_user,
- unsigned int flags)
-{
- if (flags & RISCV_HWPROBE_WHICH_CPUS)
- return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
- cpus_user, flags);
+#ifdef CONFIG_MMU
- return hwprobe_get_values(pairs, pair_count, cpusetsize,
- cpus_user, flags);
+static DECLARE_COMPLETION(boot_probes_done);
+static atomic_t pending_boot_probes = ATOMIC_INIT(1);
+
+void riscv_hwprobe_register_async_probe(void)
+{
+ atomic_inc(&pending_boot_probes);
}
-#ifdef CONFIG_MMU
+void riscv_hwprobe_complete_async_probe(void)
+{
+ if (atomic_dec_and_test(&pending_boot_probes))
+ complete(&boot_probes_done);
+}
-static int __init init_hwprobe_vdso_data(void)
+static int complete_hwprobe_vdso_data(void)
{
- struct vdso_data *vd = __arch_get_k_vdso_data();
- struct arch_vdso_data *avd = &vd->arch_data;
+ struct vdso_arch_data *avd = vdso_k_arch_data;
u64 id_bitsmash = 0;
struct riscv_hwprobe pair;
int key;
+ if (unlikely(!atomic_dec_and_test(&pending_boot_probes)))
+ wait_for_completion(&boot_probes_done);
+
/*
* Initialize vDSO data with the answers for the "all CPUs" case, to
* save a syscall in the common case.
@@ -409,13 +523,52 @@ static int __init init_hwprobe_vdso_data(void)
* vDSO should defer to the kernel for exotic cpu masks.
*/
avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
+
+ /*
+ * Make sure all the VDSO values are visible before we look at them.
+ * This pairs with the implicit "no speculativly visible accesses"
+ * barrier in the VDSO hwprobe code.
+ */
+ smp_wmb();
+ avd->ready = true;
+ return 0;
+}
+
+static int __init init_hwprobe_vdso_data(void)
+{
+ struct vdso_arch_data *avd = vdso_k_arch_data;
+
+ /*
+ * Prevent the vDSO cached values from being used, as they're not ready
+ * yet.
+ */
+ avd->ready = false;
return 0;
}
arch_initcall_sync(init_hwprobe_vdso_data);
+#else
+
+static int complete_hwprobe_vdso_data(void) { return 0; }
+
#endif /* CONFIG_MMU */
+static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data);
+
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+
+ return hwprobe_get_values(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+}
+
SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
cpus, unsigned int, flags)