summaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-01 09:34:55 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-01 09:34:55 -1000
commit56ec8e4cd8cbff3c96c53cd8303bba924613b5ce (patch)
tree72d38b1c2a6d8be1a25eba18b7fd97d12bc7ab81 /arch/arm64/kernel
parent7d461b291e65938f15f56fe58da2303b07578a76 (diff)
parent14dcf78a6c042dd9421b11485b394c6273568bca (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "No major architecture features this time around, just some new HWCAP definitions, support for the Ampere SoC PMUs and a few fixes/cleanups. The bulk of the changes is reworking of the CPU capability checking code (cpus_have_cap() etc). - Major refactoring of the CPU capability detection logic resulting in the removal of the cpus_have_const_cap() function and migrating the code to "alternative" branches where possible - Backtrace/kgdb: use IPIs and pseudo-NMI - Perf and PMU: - Add support for Ampere SoC PMUs - Multi-DTC improvements for larger CMN configurations with multiple Debug & Trace Controllers - Rework the Arm CoreSight PMU driver to allow separate registration of vendor backend modules - Fixes: add missing MODULE_DEVICE_TABLE to the amlogic perf driver; use device_get_match_data() in the xgene driver; fix NULL pointer dereference in the hisi driver caused by calling cpuhp_state_remove_instance(); use-after-free in the hisi driver - HWCAP updates: - FEAT_SVE_B16B16 (BFloat16) - FEAT_LRCPC3 (release consistency model) - FEAT_LSE128 (128-bit atomic instructions) - SVE: remove a couple of pseudo registers from the cpufeature code. There is logic in place already to detect mismatched SVE features - Miscellaneous: - Reduce the default swiotlb size (currently 64MB) if no ZONE_DMA bouncing is needed. The buffer is still required for small kmalloc() buffers - Fix module PLT counting with !RANDOMIZE_BASE - Restrict CPU_BIG_ENDIAN to LLVM IAS 15.x or newer move synchronisation code out of the set_ptes() loop - More compact cpufeature displaying enabled cores - Kselftest updates for the new CPU features" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (83 commits) arm64: Restrict CPU_BIG_ENDIAN to GNU as or LLVM IAS 15.x or newer arm64: module: Fix PLT counting when CONFIG_RANDOMIZE_BASE=n arm64, irqchip/gic-v3, ACPI: Move MADT GICC enabled check into a helper perf: hisi: Fix use-after-free when register pmu fails drivers/perf: hisi_pcie: Initialize event->cpu only on success drivers/perf: hisi_pcie: Check the type first in pmu::event_init() arm64: cpufeature: Change DBM to display enabled cores arm64: cpufeature: Display the set of cores with a feature perf/arm-cmn: Enable per-DTC counter allocation perf/arm-cmn: Rework DTC counters (again) perf/arm-cmn: Fix DTC domain detection drivers: perf: arm_pmuv3: Drop some unused arguments from armv8_pmu_init() drivers: perf: arm_pmuv3: Read PMMIR_EL1 unconditionally drivers/perf: hisi: use cpuhp_state_remove_instance_nocalls() for hisi_hns3_pmu uninit process clocksource/drivers/arm_arch_timer: limit XGene-1 workaround arm64: Remove system_uses_lse_atomics() arm64: Mark the 'addr' argument to set_ptes() and __set_pte_at() as unused drivers/perf: xgene: Use device_get_match_data() perf/amlogic: add missing MODULE_DEVICE_TABLE arm64/mm: Hoist synchronization out of set_ptes() loop ...
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c2
-rw-r--r--arch/arm64/kernel/cpu_errata.c17
-rw-r--r--arch/arm64/kernel/cpufeature.c272
-rw-r--r--arch/arm64/kernel/cpuinfo.c3
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/fpsimd.c149
-rw-r--r--arch/arm64/kernel/idle.c4
-rw-r--r--arch/arm64/kernel/module-plts.c13
-rw-r--r--arch/arm64/kernel/mte.c4
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/proton-pack.c2
-rw-r--r--arch/arm64/kernel/smp.c147
-rw-r--r--arch/arm64/kernel/suspend.c13
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kernel/vdso.c2
16 files changed, 331 insertions, 306 deletions
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index b1990e38aed0..e1be29e608b7 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -103,7 +103,7 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
&mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
- arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+ arch_send_wakeup_ipi(cpu);
return 0;
}
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 5706e74c5578..e29e0fea63fb 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -121,22 +121,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
}
-static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
-static void __maybe_unused
-cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
-{
- struct arm64_ftr_reg *regp;
-
- regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
- if (!regp)
- return;
-
- raw_spin_lock(&reg_user_mask_modification);
- if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
- regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
- raw_spin_unlock(&reg_user_mask_modification);
-}
-
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -727,7 +711,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A510 r0p0 - r1p1 */
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
- .cpu_enable = cpu_clear_bf16_from_user_emulation,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2966298
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 444a73c2e638..f6b2e2906fc9 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -279,6 +279,8 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_B16B16_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0),
@@ -611,18 +613,6 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
ARM64_FTR_END,
};
-static const struct arm64_ftr_bits ftr_zcr[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
- ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_WIDTH, 0), /* LEN */
- ARM64_FTR_END,
-};
-
-static const struct arm64_ftr_bits ftr_smcr[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
- SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_WIDTH, 0), /* LEN */
- ARM64_FTR_END,
-};
-
/*
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
@@ -735,10 +725,6 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
- /* Op1 = 0, CRn = 1, CRm = 2 */
- ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
- ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr),
-
/* Op1 = 1, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
@@ -1040,22 +1026,26 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
- info->reg_zcr = read_zcr_features();
- init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
+ unsigned long cpacr = cpacr_save_enable_kernel_sve();
+
vec_init_vq_map(ARM64_VEC_SVE);
+
+ cpacr_restore(cpacr);
}
if (IS_ENABLED(CONFIG_ARM64_SME) &&
id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
- info->reg_smcr = read_smcr_features();
+ unsigned long cpacr = cpacr_save_enable_kernel_sme();
+
/*
* We mask out SMPS since even if the hardware
* supports priorities the kernel does not at present
* and we block access to them.
*/
info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
- init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
vec_init_vq_map(ARM64_VEC_SME);
+
+ cpacr_restore(cpacr);
}
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
@@ -1289,32 +1279,34 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
+ /* Probe vector lengths */
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
- info->reg_zcr = read_zcr_features();
- taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
- info->reg_zcr, boot->reg_zcr);
+ if (!system_capabilities_finalized()) {
+ unsigned long cpacr = cpacr_save_enable_kernel_sve();
- /* Probe vector lengths */
- if (!system_capabilities_finalized())
vec_update_vq_map(ARM64_VEC_SVE);
+
+ cpacr_restore(cpacr);
+ }
}
if (IS_ENABLED(CONFIG_ARM64_SME) &&
id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
- info->reg_smcr = read_smcr_features();
+ unsigned long cpacr = cpacr_save_enable_kernel_sme();
+
/*
* We mask out SMPS since even if the hardware
* supports priorities the kernel does not at present
* and we block access to them.
*/
info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
- taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu,
- info->reg_smcr, boot->reg_smcr);
/* Probe vector lengths */
if (!system_capabilities_finalized())
vec_update_vq_map(ARM64_VEC_SME);
+
+ cpacr_restore(cpacr);
}
/*
@@ -1564,14 +1556,6 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
}
-static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
-{
- u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
-
- return cpuid_feature_extract_signed_field(pfr0,
- ID_AA64PFR0_EL1_FP_SHIFT) < 0;
-}
-
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
int scope)
{
@@ -1621,7 +1605,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
if (is_kdump_kernel())
return false;
- if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
+ if (cpus_have_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
return false;
return has_cpuid_feature(entry, scope);
@@ -1754,16 +1738,15 @@ void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int), int flags);
-static phys_addr_t kpti_ng_temp_alloc;
+static phys_addr_t __initdata kpti_ng_temp_alloc;
-static phys_addr_t kpti_ng_pgd_alloc(int shift)
+static phys_addr_t __init kpti_ng_pgd_alloc(int shift)
{
kpti_ng_temp_alloc -= PAGE_SIZE;
return kpti_ng_temp_alloc;
}
-static void
-kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+static int __init __kpti_install_ng_mappings(void *__unused)
{
typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
@@ -1776,20 +1759,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
pgd_t *kpti_ng_temp_pgd;
u64 alloc = 0;
- if (__this_cpu_read(this_cpu_vector) == vectors) {
- const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
-
- __this_cpu_write(this_cpu_vector, v);
- }
-
- /*
- * We don't need to rewrite the page-tables if either we've done
- * it already or we have KASLR enabled and therefore have not
- * created any global mappings at all.
- */
- if (arm64_use_ng_mappings)
- return;
-
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
if (!cpu) {
@@ -1826,14 +1795,39 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
free_pages(alloc, order);
arm64_use_ng_mappings = true;
}
+
+ return 0;
+}
+
+static void __init kpti_install_ng_mappings(void)
+{
+ /*
+ * We don't need to rewrite the page-tables if either we've done
+ * it already or we have KASLR enabled and therefore have not
+ * created any global mappings at all.
+ */
+ if (arm64_use_ng_mappings)
+ return;
+
+ stop_machine(__kpti_install_ng_mappings, NULL, cpu_online_mask);
}
+
#else
-static void
-kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+static inline void kpti_install_ng_mappings(void)
{
}
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+static void cpu_enable_kpti(struct arm64_cpu_capabilities const *cap)
+{
+ if (__this_cpu_read(this_cpu_vector) == vectors) {
+ const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
+
+ __this_cpu_write(this_cpu_vector, v);
+ }
+
+}
+
static int __init parse_kpti(char *str)
{
bool enabled;
@@ -1848,6 +1842,8 @@ static int __init parse_kpti(char *str)
early_param("kpti", parse_kpti);
#ifdef CONFIG_ARM64_HW_AFDBM
+static struct cpumask dbm_cpus __read_mostly;
+
static inline void __cpu_enable_hw_dbm(void)
{
u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
@@ -1883,35 +1879,22 @@ static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
{
- if (cpu_can_use_dbm(cap))
+ if (cpu_can_use_dbm(cap)) {
__cpu_enable_hw_dbm();
+ cpumask_set_cpu(smp_processor_id(), &dbm_cpus);
+ }
}
static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
int __unused)
{
- static bool detected = false;
/*
* DBM is a non-conflicting feature. i.e, the kernel can safely
* run a mix of CPUs with and without the feature. So, we
* unconditionally enable the capability to allow any late CPU
* to use the feature. We only enable the control bits on the
- * CPU, if it actually supports.
- *
- * We have to make sure we print the "feature" detection only
- * when at least one CPU actually uses it. So check if this CPU
- * can actually use it and print the message exactly once.
- *
- * This is safe as all CPUs (including secondary CPUs - due to the
- * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
- * goes through the "matches" check exactly once. Also if a CPU
- * matches the criteria, it is guaranteed that the CPU will turn
- * the DBM on, as the capability is unconditionally enabled.
+ * CPU, if it is supported.
*/
- if (!detected && cpu_can_use_dbm(cap)) {
- detected = true;
- pr_info("detected: Hardware dirty bit management\n");
- }
return true;
}
@@ -1944,8 +1927,6 @@ int get_cpu_with_amu_feat(void)
static void cpu_amu_enable(struct arm64_cpu_capabilities const *cap)
{
if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {
- pr_info("detected CPU%d: Activity Monitors Unit (AMU)\n",
- smp_processor_id());
cpumask_set_cpu(smp_processor_id(), &amu_cpus);
/* 0 reference values signal broken/disabled counters */
@@ -2190,12 +2171,23 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
}
#endif /* CONFIG_ARM64_MTE */
+static void user_feature_fixup(void)
+{
+ if (cpus_have_cap(ARM64_WORKAROUND_2658417)) {
+ struct arm64_ftr_reg *regp;
+
+ regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+ if (regp)
+ regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
+ }
+}
+
static void elf_hwcap_fixup(void)
{
-#ifdef CONFIG_ARM64_ERRATUM_1742098
- if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
+#ifdef CONFIG_COMPAT
+ if (cpus_have_cap(ARM64_WORKAROUND_1742098))
compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
-#endif /* ARM64_ERRATUM_1742098 */
+#endif /* CONFIG_COMPAT */
}
#ifdef CONFIG_KVM
@@ -2351,7 +2343,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
- .cpu_enable = kpti_install_ng_mappings,
+ .cpu_enable = cpu_enable_kpti,
.matches = unmap_kernel_at_el0,
/*
* The ID feature fields below are used to indicate that
@@ -2361,11 +2353,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, CSV3, IMP)
},
{
- /* FP/SIMD is not implemented */
- .capability = ARM64_HAS_NO_FPSIMD,
- .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
- .min_field_value = 0,
- .matches = has_no_fpsimd,
+ .capability = ARM64_HAS_FPSIMD,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_fpsimd,
+ ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, FP, IMP)
},
#ifdef CONFIG_ARM64_PMEM
{
@@ -2388,7 +2380,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.desc = "Scalable Vector Extension",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SVE,
- .cpu_enable = sve_kernel_enable,
+ .cpu_enable = cpu_enable_sve,
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP)
},
@@ -2405,16 +2397,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
#endif /* CONFIG_ARM64_RAS_EXTN */
#ifdef CONFIG_ARM64_AMU_EXTN
{
- /*
- * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
- * Therefore, don't provide .desc as we don't want the detection
- * message to be shown until at least one CPU is detected to
- * support the feature.
- */
+ .desc = "Activity Monitors Unit (AMU)",
.capability = ARM64_HAS_AMU_EXTN,
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_amu,
.cpu_enable = cpu_amu_enable,
+ .cpus = &amu_cpus,
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, AMU, IMP)
},
#endif /* CONFIG_ARM64_AMU_EXTN */
@@ -2454,18 +2442,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
},
#ifdef CONFIG_ARM64_HW_AFDBM
{
- /*
- * Since we turn this on always, we don't want the user to
- * think that the feature is available when it may not be.
- * So hide the description.
- *
- * .desc = "Hardware pagetable Dirty Bit Management",
- *
- */
+ .desc = "Hardware dirty bit management",
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.capability = ARM64_HW_DBM,
.matches = has_hw_dbm,
.cpu_enable = cpu_enable_hw_dbm,
+ .cpus = &dbm_cpus,
ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, HAFDBS, DBM)
},
#endif
@@ -2641,7 +2623,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME,
.matches = has_cpuid_feature,
- .cpu_enable = sme_kernel_enable,
+ .cpu_enable = cpu_enable_sme,
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP)
},
/* FA64 should be sorted after the base SME capability */
@@ -2650,7 +2632,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME_FA64,
.matches = has_cpuid_feature,
- .cpu_enable = fa64_kernel_enable,
+ .cpu_enable = cpu_enable_fa64,
ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP)
},
{
@@ -2658,7 +2640,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_SME2,
.matches = has_cpuid_feature,
- .cpu_enable = sme2_kernel_enable,
+ .cpu_enable = cpu_enable_sme2,
ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2)
},
#endif /* CONFIG_ARM64_SME */
@@ -2787,6 +2769,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR0_EL1, SHA2, SHA512, CAP_HWCAP, KERNEL_HWCAP_SHA512),
HWCAP_CAP(ID_AA64ISAR0_EL1, CRC32, IMP, CAP_HWCAP, KERNEL_HWCAP_CRC32),
HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, IMP, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
+ HWCAP_CAP(ID_AA64ISAR0_EL1, ATOMIC, FEAT_LSE128, CAP_HWCAP, KERNEL_HWCAP_LSE128),
HWCAP_CAP(ID_AA64ISAR0_EL1, RDM, IMP, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
HWCAP_CAP(ID_AA64ISAR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SHA3),
HWCAP_CAP(ID_AA64ISAR0_EL1, SM3, IMP, CAP_HWCAP, KERNEL_HWCAP_SM3),
@@ -2807,6 +2790,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ISAR1_EL1, FCMA, IMP, CAP_HWCAP, KERNEL_HWCAP_FCMA),
HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, IMP, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
+ HWCAP_CAP(ID_AA64ISAR1_EL1, LRCPC, LRCPC3, CAP_HWCAP, KERNEL_HWCAP_LRCPC3),
HWCAP_CAP(ID_AA64ISAR1_EL1, FRINTTS, IMP, CAP_HWCAP, KERNEL_HWCAP_FRINT),
HWCAP_CAP(ID_AA64ISAR1_EL1, SB, IMP, CAP_HWCAP, KERNEL_HWCAP_SB),
HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_BF16),
@@ -2821,6 +2805,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(ID_AA64ZFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
HWCAP_CAP(ID_AA64ZFR0_EL1, AES, PMULL128, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
HWCAP_CAP(ID_AA64ZFR0_EL1, BitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
+ HWCAP_CAP(ID_AA64ZFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE_B16B16),
HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, IMP, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
HWCAP_CAP(ID_AA64ZFR0_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_SVE_EBF16),
HWCAP_CAP(ID_AA64ZFR0_EL1, SHA3, IMP, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
@@ -2981,7 +2966,7 @@ static void update_cpu_capabilities(u16 scope_mask)
!caps->matches(caps, cpucap_default_scope(caps)))
continue;
- if (caps->desc)
+ if (caps->desc && !caps->cpus)
pr_info("detected: %s\n", caps->desc);
__set_bit(caps->capability, system_cpucaps);
@@ -3153,36 +3138,28 @@ static void verify_local_elf_hwcaps(void)
static void verify_sve_features(void)
{
- u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
- u64 zcr = read_zcr_features();
+ unsigned long cpacr = cpacr_save_enable_kernel_sve();
- unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
- unsigned int len = zcr & ZCR_ELx_LEN_MASK;
-
- if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SVE)) {
+ if (vec_verify_vq_map(ARM64_VEC_SVE)) {
pr_crit("CPU%d: SVE: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
- /* Add checks on other ZCR bits here if necessary */
+ cpacr_restore(cpacr);
}
static void verify_sme_features(void)
{
- u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
- u64 smcr = read_smcr_features();
-
- unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK;
- unsigned int len = smcr & SMCR_ELx_LEN_MASK;
+ unsigned long cpacr = cpacr_save_enable_kernel_sme();
- if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) {
+ if (vec_verify_vq_map(ARM64_VEC_SME)) {
pr_crit("CPU%d: SME: vector length support mismatch\n",
smp_processor_id());
cpu_die_early();
}
- /* Add checks on other SMCR bits here if necessary */
+ cpacr_restore(cpacr);
}
static void verify_hyp_capabilities(void)
@@ -3289,7 +3266,6 @@ EXPORT_SYMBOL_GPL(this_cpu_has_cap);
* This helper function is used in a narrow window when,
* - The system wide safe registers are set with all the SMP CPUs and,
* - The SYSTEM_FEATURE system_cpucaps may not have been set.
- * In all other cases cpus_have_{const_}cap() should be used.
*/
static bool __maybe_unused __system_matches_cap(unsigned int n)
{
@@ -3328,23 +3304,50 @@ unsigned long cpu_get_elf_hwcap2(void)
return elf_hwcap[1];
}
-static void __init setup_system_capabilities(void)
+void __init setup_system_features(void)
{
+ int i;
/*
- * We have finalised the system-wide safe feature
- * registers, finalise the capabilities that depend
- * on it. Also enable all the available capabilities,
- * that are not enabled already.
+ * The system-wide safe feature feature register values have been
+ * finalized. Finalize and log the available system capabilities.
*/
update_cpu_capabilities(SCOPE_SYSTEM);
+ if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+ !cpus_have_cap(ARM64_HAS_PAN))
+ pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+
+ /*
+ * Enable all the available capabilities which have not been enabled
+ * already.
+ */
enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+
+ kpti_install_ng_mappings();
+
+ sve_setup();
+ sme_setup();
+
+ /*
+ * Check for sane CTR_EL0.CWG value.
+ */
+ if (!cache_type_cwg())
+ pr_warn("No Cache Writeback Granule information, assuming %d\n",
+ ARCH_DMA_MINALIGN);
+
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ const struct arm64_cpu_capabilities *caps = cpucap_ptrs[i];
+
+ if (caps && caps->cpus && caps->desc &&
+ cpumask_any(caps->cpus) < nr_cpu_ids)
+ pr_info("detected: %s on CPU%*pbl\n",
+ caps->desc, cpumask_pr_args(caps->cpus));
+ }
}
-void __init setup_cpu_features(void)
+void __init setup_user_features(void)
{
- u32 cwg;
+ user_feature_fixup();
- setup_system_capabilities();
setup_elf_hwcaps(arm64_elf_hwcaps);
if (system_supports_32bit_el0()) {
@@ -3352,20 +3355,7 @@ void __init setup_cpu_features(void)
elf_hwcap_fixup();
}
- if (system_uses_ttbr0_pan())
- pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
-
- sve_setup();
- sme_setup();
minsigstksz_setup();
-
- /*
- * Check for sane CTR_EL0.CWG value.
- */
- cwg = cache_type_cwg();
- if (!cwg)
- pr_warn("No Cache Writeback Granule information, assuming %d\n",
- ARCH_DMA_MINALIGN);
}
static int enable_mismatched_32bit_el0(unsigned int cpu)
@@ -3422,7 +3412,7 @@ subsys_initcall_sync(init_32bit_el0_mask);
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
+ cpu_enable_swapper_cnp();
}
/*
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 98fda8500535..a257da7b56fe 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -127,6 +127,9 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_SME_F16F16] = "smef16f16",
[KERNEL_HWCAP_MOPS] = "mops",
[KERNEL_HWCAP_HBC] = "hbc",
+ [KERNEL_HWCAP_SVE_B16B16] = "sveb16b16",
+ [KERNEL_HWCAP_LRCPC3] = "lrcpc3",
+ [KERNEL_HWCAP_LSE128] = "lse128",
};
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 2b478ca356b0..3f8c9c143552 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -113,8 +113,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
if (md->attribute & EFI_MEMORY_XP)
pte = set_pte_bit(pte, __pgprot(PTE_PXN));
- else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
- system_supports_bti() && spd->has_bti)
+ else if (system_supports_bti_kernel() && spd->has_bti)
pte = set_pte_bit(pte, __pgprot(PTE_GP));
set_pte(ptep, pte);
return 0;
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 91e44ac7150f..5ddc246f1482 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1160,44 +1160,20 @@ fail:
panic("Cannot allocate percpu memory for EFI SVE save/restore");
}
-/*
- * Enable SVE for EL1.
- * Intended for use by the cpufeatures code during CPU boot.
- */
-void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
{
write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
isb();
}
-/*
- * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
- * vector length.
- *
- * Use only if SVE is present.
- * This function clobbers the SVE vector length.
- */
-u64 read_zcr_features(void)
-{
- /*
- * Set the maximum possible VL, and write zeroes to all other
- * bits to see if they stick.
- */
- sve_kernel_enable(NULL);
- write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
-
- /* Return LEN value that would be written to get the maximum VL */
- return sve_vq_from_vl(sve_get_vl()) - 1;
-}
-
void __init sve_setup(void)
{
struct vl_info *info = &vl_info[ARM64_VEC_SVE];
- u64 zcr;
DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
unsigned long b;
+ int max_bit;
- if (!system_supports_sve())
+ if (!cpus_have_cap(ARM64_SVE))
return;
/*
@@ -1208,17 +1184,8 @@ void __init sve_setup(void)
if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map)))
set_bit(__vq_to_bit(SVE_VQ_MIN), info->vq_map);
- zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
- info->max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
-
- /*
- * Sanity-check that the max VL we determined through CPU features
- * corresponds properly to sve_vq_map. If not, do our best:
- */
- if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SVE,
- info->max_vl)))
- info->max_vl = find_supported_vector_length(ARM64_VEC_SVE,
- info->max_vl);
+ max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
+ info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
/*
* For the default VL, pick the maximum supported value <= 64.
@@ -1296,7 +1263,7 @@ static void sme_free(struct task_struct *task)
task->thread.sme_state = NULL;
}
-void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
{
/* Set priority for all PEs to architecturally defined minimum */
write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
@@ -1311,80 +1278,48 @@ void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
isb();
}
-/*
- * This must be called after sme_kernel_enable(), we rely on the
- * feature table being sorted to ensure this.
- */
-void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
{
+ /* This must be enabled after SME */
+ BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
+
/* Allow use of ZT0 */
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
SYS_SMCR_EL1);
}
-/*
- * This must be called after sme_kernel_enable(), we rely on the
- * feature table being sorted to ensure this.
- */
-void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
{
+ /* This must be enabled after SME */
+ BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);
+
/* Allow use of FA64 */
write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
SYS_SMCR_EL1);
}
-/*
- * Read the pseudo-SMCR used by cpufeatures to identify the supported
- * vector length.
- *
- * Use only if SME is present.
- * This function clobbers the SME vector length.
- */
-u64 read_smcr_features(void)
-{
- sme_kernel_enable(NULL);
-
- /*
- * Set the maximum possible VL.
- */
- write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
- SYS_SMCR_EL1);
-
- /* Return LEN value that would be written to get the maximum VL */
- return sve_vq_from_vl(sme_get_vl()) - 1;
-}
-
void __init sme_setup(void)
{
struct vl_info *info = &vl_info[ARM64_VEC_SME];
- u64 smcr;
- int min_bit;
+ int min_bit, max_bit;
- if (!system_supports_sme())
+ if (!cpus_have_cap(ARM64_SME))
return;
/*
* SME doesn't require any particular vector length be
* supported but it does require at least one. We should have
* disabled the feature entirely while bringing up CPUs but
- * let's double check here.
+ * let's double check here. The bitmap is SVE_VQ_MAP sized for
+ * sharing with SVE.
*/
WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX));
min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX);
info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit));
- smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
- info->max_vl = sve_vl_from_vq((smcr & SMCR_ELx_LEN_MASK) + 1);
-
- /*
- * Sanity-check that the max VL we determined through CPU features
- * corresponds properly to sme_vq_map. If not, do our best:
- */
- if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SME,
- info->max_vl)))
- info->max_vl = find_supported_vector_length(ARM64_VEC_SME,
- info->max_vl);
+ max_bit = find_first_bit(info->vq_map, SVE_VQ_MAX);
+ info->max_vl = sve_vl_from_vq(__bit_to_vq(max_bit));
WARN_ON(info->min_vl > info->max_vl);
@@ -1529,8 +1464,17 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
*/
void do_fpsimd_acc(unsigned long esr, struct pt_regs *regs)
{
- /* TODO: implement lazy context saving/restoring */
- WARN_ON(1);
+ /* Even if we chose not to use FPSIMD, the hardware could still trap: */
+ if (!system_supports_fpsimd()) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ /*
+ * When FPSIMD is enabled, we should never take a trap unless something
+ * has gone very wrong.
+ */
+ BUG();
}
/*
@@ -1771,13 +1715,23 @@ void fpsimd_bind_state_to_cpu(struct cpu_fp_state *state)
void fpsimd_restore_current_state(void)
{
/*
- * For the tasks that were created before we detected the absence of
- * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
- * e.g, init. This could be then inherited by the children processes.
- * If we later detect that the system doesn't support FP/SIMD,
- * we must clear the flag for all the tasks to indicate that the
- * FPSTATE is clean (as we can't have one) to avoid looping for ever in
- * do_notify_resume().
+ * TIF_FOREIGN_FPSTATE is set on the init task and copied by
+ * arch_dup_task_struct() regardless of whether FP/SIMD is detected.
+ * Thus user threads can have this set even when FP/SIMD hasn't been
+ * detected.
+ *
+ * When FP/SIMD is detected, begin_new_exec() will set
+ * TIF_FOREIGN_FPSTATE via flush_thread() -> fpsimd_flush_thread(),
+ * and fpsimd_thread_switch() will set TIF_FOREIGN_FPSTATE when
+ * switching tasks. We detect FP/SIMD before we exec the first user
+ * process, ensuring this has TIF_FOREIGN_FPSTATE set and
+ * do_notify_resume() will call fpsimd_restore_current_state() to
+ * install the user FP/SIMD context.
+ *
+ * When FP/SIMD is not detected, nothing else will clear or set
+ * TIF_FOREIGN_FPSTATE prior to the first return to userspace, and
+ * we must clear TIF_FOREIGN_FPSTATE to avoid do_notify_resume()
+ * looping forever calling fpsimd_restore_current_state().
*/
if (!system_supports_fpsimd()) {
clear_thread_flag(TIF_FOREIGN_FPSTATE);
@@ -2110,6 +2064,13 @@ static inline void fpsimd_hotplug_init(void)
static inline void fpsimd_hotplug_init(void) { }
#endif
+void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ unsigned long enable = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN;
+ write_sysreg(read_sysreg(CPACR_EL1) | enable, CPACR_EL1);
+ isb();
+}
+
/*
* FP/SIMD support code initialisation.
*/
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
index c1125753fe9b..05cfb347ec26 100644
--- a/arch/arm64/kernel/idle.c
+++ b/arch/arm64/kernel/idle.c
@@ -20,7 +20,7 @@
* ensure that interrupts are not masked at the PMR (because the core will
* not wake up if we block the wake up signal in the interrupt controller).
*/
-void noinstr cpu_do_idle(void)
+void __cpuidle cpu_do_idle(void)
{
struct arm_cpuidle_irq_context context;
@@ -35,7 +35,7 @@ void noinstr cpu_do_idle(void)
/*
* This is our default idle handler.
*/
-void noinstr arch_cpu_idle(void)
+void __cpuidle arch_cpu_idle(void)
{
/*
* This should do all the clock switching and wait for interrupt
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index bd69a4e7cd60..bde32979c06a 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -167,9 +167,6 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
switch (ELF64_R_TYPE(rela[i].r_info)) {
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- break;
-
/*
* We only have to consider branch targets that resolve
* to symbols that are defined in a different section.
@@ -203,8 +200,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
case R_AARCH64_ADR_PREL_PG_HI21:
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
- !cpus_have_const_cap(ARM64_WORKAROUND_843419))
+ if (!cpus_have_final_cap(ARM64_WORKAROUND_843419))
break;
/*
@@ -239,13 +235,13 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
}
}
- if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
- cpus_have_const_cap(ARM64_WORKAROUND_843419))
+ if (cpus_have_final_cap(ARM64_WORKAROUND_843419)) {
/*
* Add some slack so we can skip PLT slots that may trigger
* the erratum due to the placement of the ADRP instruction.
*/
ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
+ }
return ret;
}
@@ -269,9 +265,6 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
{
int i = 0, j = numrels - 1;
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return 0;
-
while (i < j) {
if (branch_rela_needs_plt(syms, &rela[i], dstidx))
i++;
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 4edecaac8f91..2fb5e7a7a4d5 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -35,10 +35,10 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode);
EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
#endif
-void mte_sync_tags(pte_t pte)
+void mte_sync_tags(pte_t pte, unsigned int nr_pages)
{
struct page *page = pte_page(pte);
- long i, nr_pages = compound_nr(page);
+ unsigned int i;
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0fcc4eb1a7ab..657ea273c0f9 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -454,7 +454,7 @@ static void ssbs_thread_switch(struct task_struct *next)
* If all CPUs implement the SSBS extension, then we just need to
* context-switch the PSTATE field.
*/
- if (cpus_have_const_cap(ARM64_SSBS))
+ if (alternative_has_cap_unlikely(ARM64_SSBS))
return;
spectre_v4_enable_task_mitigation(next);
diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c
index 05f40c4e18fd..6268a13a1d58 100644
--- a/arch/arm64/kernel/proton-pack.c
+++ b/arch/arm64/kernel/proton-pack.c
@@ -972,7 +972,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
* When KPTI is in use, the vectors are switched when exiting to
* user-space.
*/
- if (arm64_kernel_unmapped_at_el0())
+ if (cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
return;
write_sysreg(v, vbar_el1);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 196533c362e1..be95b523c101 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -32,7 +32,9 @@
#include <linux/irq_work.h>
#include <linux/kernel_stat.h>
#include <linux/kexec.h>
+#include <linux/kgdb.h>
#include <linux/kvm_host.h>
+#include <linux/nmi.h>
#include <asm/alternative.h>
#include <asm/atomic.h>
@@ -72,13 +74,19 @@ enum ipi_msg_type {
IPI_CPU_CRASH_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP,
- NR_IPI
+ NR_IPI,
+ /*
+ * Any enum >= NR_IPI and < MAX_IPI is special and not tracable
+ * with trace_ipi_*
+ */
+ IPI_CPU_BACKTRACE = NR_IPI,
+ IPI_KGDB_ROUNDUP,
+ MAX_IPI
};
-static int ipi_irq_base __read_mostly;
-static int nr_ipi __read_mostly = NR_IPI;
-static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
+static int ipi_irq_base __ro_after_init;
+static int nr_ipi __ro_after_init = NR_IPI;
+static struct irq_desc *ipi_desc[MAX_IPI] __ro_after_init;
static void ipi_setup(int cpu);
@@ -431,9 +439,10 @@ static void __init hyp_mode_check(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- setup_cpu_features();
+ setup_system_features();
hyp_mode_check();
apply_alternatives_all();
+ setup_user_features();
mark_linear_text_alias_ro();
}
@@ -520,7 +529,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
{
u64 hwid = processor->arm_mpidr;
- if (!(processor->flags & ACPI_MADT_ENABLED)) {
+ if (!acpi_gicc_is_usable(processor)) {
pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
return;
}
@@ -764,7 +773,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
[IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
[IPI_TIMER] = "Timer broadcast interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
- [IPI_WAKEUP] = "CPU wake-up interrupts",
};
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
@@ -797,13 +805,6 @@ void arch_send_call_function_single_ipi(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
}
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
-void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
-{
- smp_cross_call(mask, IPI_WAKEUP);
-}
-#endif
-
#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void)
{
@@ -854,6 +855,38 @@ static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs
#endif
}
+static void arm64_backtrace_ipi(cpumask_t *mask)
+{
+ __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ /*
+ * NOTE: though nmi_trigger_cpumask_backtrace() has "nmi_" in the name,
+ * nothing about it truly needs to be implemented using an NMI, it's
+ * just that it's _allowed_ to work with NMIs. If ipi_should_be_nmi()
+ * returned false our backtrace attempt will just use a regular IPI.
+ */
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_backtrace_ipi);
+}
+
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(void)
+{
+ int this_cpu = raw_smp_processor_id();
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ /* No need to roundup ourselves */
+ if (cpu == this_cpu)
+ continue;
+
+ __ipi_send_single(ipi_desc[IPI_KGDB_ROUNDUP], cpu);
+ }
+}
+#endif
+
/*
* Main handler for inter-processor interrupts
*/
@@ -897,13 +930,17 @@ static void do_handle_IPI(int ipinr)
break;
#endif
-#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
- case IPI_WAKEUP:
- WARN_ONCE(!acpi_parking_protocol_valid(cpu),
- "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
- cpu);
+ case IPI_CPU_BACKTRACE:
+ /*
+ * NOTE: in some cases this _won't_ be NMI context. See the
+ * comment in arch_trigger_cpumask_backtrace().
+ */
+ nmi_cpu_backtrace(get_irq_regs());
+ break;
+
+ case IPI_KGDB_ROUNDUP:
+ kgdb_nmicallback(cpu, get_irq_regs());
break;
-#endif
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
@@ -926,6 +963,25 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
__ipi_send_mask(ipi_desc[ipinr], target);
}
+static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
+{
+ DECLARE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+
+ if (!system_uses_irq_prio_masking() ||
+ !static_branch_likely(&supports_pseudo_nmis))
+ return false;
+
+ switch (ipi) {
+ case IPI_CPU_STOP:
+ case IPI_CPU_CRASH_STOP:
+ case IPI_CPU_BACKTRACE:
+ case IPI_KGDB_ROUNDUP:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void ipi_setup(int cpu)
{
int i;
@@ -933,8 +989,14 @@ static void ipi_setup(int cpu)
if (WARN_ON_ONCE(!ipi_irq_base))
return;
- for (i = 0; i < nr_ipi; i++)
- enable_percpu_irq(ipi_irq_base + i, 0);
+ for (i = 0; i < nr_ipi; i++) {
+ if (ipi_should_be_nmi(i)) {
+ prepare_percpu_nmi(ipi_irq_base + i);
+ enable_percpu_nmi(ipi_irq_base + i, 0);
+ } else {
+ enable_percpu_irq(ipi_irq_base + i, 0);
+ }
+ }
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -945,8 +1007,14 @@ static void ipi_teardown(int cpu)
if (WARN_ON_ONCE(!ipi_irq_base))
return;
- for (i = 0; i < nr_ipi; i++)
- disable_percpu_irq(ipi_irq_base + i);
+ for (i = 0; i < nr_ipi; i++) {
+ if (ipi_should_be_nmi(i)) {
+ disable_percpu_nmi(ipi_irq_base + i);
+ teardown_percpu_nmi(ipi_irq_base + i);
+ } else {
+ disable_percpu_irq(ipi_irq_base + i);
+ }
+ }
}
#endif
@@ -954,15 +1022,23 @@ void __init set_smp_ipi_range(int ipi_base, int n)
{
int i;
- WARN_ON(n < NR_IPI);
- nr_ipi = min(n, NR_IPI);
+ WARN_ON(n < MAX_IPI);
+ nr_ipi = min(n, MAX_IPI);
for (i = 0; i < nr_ipi; i++) {
int err;
- err = request_percpu_irq(ipi_base + i, ipi_handler,
- "IPI", &cpu_number);
- WARN_ON(err);
+ if (ipi_should_be_nmi(i)) {
+ err = request_percpu_nmi(ipi_base + i, ipi_handler,
+ "IPI", &cpu_number);
+ WARN(err, "Could not request IPI %d as NMI, err=%d\n",
+ i, err);
+ } else {
+ err = request_percpu_irq(ipi_base + i, ipi_handler,
+ "IPI", &cpu_number);
+ WARN(err, "Could not request IPI %d as IRQ, err=%d\n",
+ i, err);
+ }
ipi_desc[i] = irq_to_desc(ipi_base + i);
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
@@ -979,6 +1055,17 @@ void arch_smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi(unsigned int cpu)
+{
+ /*
+ * We use a scheduler IPI to wake the CPU as this avoids the need for a
+ * dedicated IPI and we can safely handle spurious scheduler IPIs.
+ */
+ smp_send_reschedule(cpu);
+}
+#endif
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 0fbdf5fe64d8..eca4d0435211 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -55,13 +55,13 @@ void notrace __cpu_suspend_exit(void)
/* Restore CnP bit in TTBR1_EL1 */
if (system_supports_cnp())
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
+ cpu_enable_swapper_cnp();
/*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
*/
- if (cpus_have_const_cap(ARM64_HAS_DIT))
+ if (alternative_has_cap_unlikely(ARM64_HAS_DIT))
set_pstate_dit(1);
__uaccess_enable_hw_pan();
@@ -98,6 +98,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
struct sleep_stack_data state;
struct arm_cpuidle_irq_context context;
+ /*
+ * Some portions of CPU state (e.g. PSTATE.{PAN,DIT}) are initialized
+ * before alternatives are patched, but are only restored by
+ * __cpu_suspend_exit() after alternatives are patched. To avoid
+ * accidentally losing these bits we must not attempt to suspend until
+ * after alternatives have been patched.
+ */
+ WARN_ON(!system_capabilities_finalized());
+
/* Report any MTE async fault before going to suspend */
mte_suspend_enter();
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index df14336c3a29..4a609e9b65de 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -31,7 +31,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
- if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) {
/*
* The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8b70759cdbb9..9eba6cdd7038 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -631,7 +631,7 @@ static void ctr_read_handler(unsigned long esr, struct pt_regs *regs)
int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
- if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
+ if (cpus_have_final_cap(ARM64_WORKAROUND_1542419)) {
/* Hide DIC so that we can trap the unnecessary maintenance...*/
val &= ~BIT(CTR_EL0_DIC_SHIFT);
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index d9e1355730ef..5562daf38a22 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -212,7 +212,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
if (IS_ERR(ret))
goto up_fail;
- if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
+ if (system_supports_bti_kernel())
gp_flags = VM_ARM64_BTI;
vdso_base += VVAR_NR_PAGES * PAGE_SIZE;