summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt12
-rw-r--r--Documentation/arm64/elf_hwcaps.rst33
-rw-r--r--Documentation/arm64/index.rst1
-rw-r--r--Documentation/arm64/sme.rst428
-rw-r--r--Documentation/arm64/sve.rst70
-rw-r--r--Documentation/devicetree/bindings/perf/arm,cmn.yaml2
-rw-r--r--arch/Kconfig7
-rw-r--r--arch/arm64/Kconfig12
-rw-r--r--arch/arm64/include/asm/cpu.h4
-rw-r--r--arch/arm64/include/asm/cpufeature.h24
-rw-r--r--arch/arm64/include/asm/el2_setup.h64
-rw-r--r--arch/arm64/include/asm/esr.h13
-rw-r--r--arch/arm64/include/asm/exception.h1
-rw-r--r--arch/arm64/include/asm/fpsimd.h135
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h87
-rw-r--r--arch/arm64/include/asm/ftrace.h7
-rw-r--r--arch/arm64/include/asm/hwcap.h8
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_host.h4
-rw-r--r--arch/arm64/include/asm/mte.h1
-rw-r--r--arch/arm64/include/asm/processor.h26
-rw-r--r--arch/arm64/include/asm/stacktrace.h32
-rw-r--r--arch/arm64/include/asm/sysreg.h67
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h15
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h8
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h69
-rw-r--r--arch/arm64/include/uapi/asm/sigcontext.h55
-rw-r--r--arch/arm64/kernel/cpufeature.c107
-rw-r--r--arch/arm64/kernel/cpuinfo.c13
-rw-r--r--arch/arm64/kernel/entry-common.c11
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S36
-rw-r--r--arch/arm64/kernel/entry-ftrace.S17
-rw-r--r--arch/arm64/kernel/fpsimd.c649
-rw-r--r--arch/arm64/kernel/ftrace.c17
-rw-r--r--arch/arm64/kernel/machine_kexec.c9
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c12
-rw-r--r--arch/arm64/kernel/mte.c30
-rw-r--r--arch/arm64/kernel/process.c44
-rw-r--r--arch/arm64/kernel/ptrace.c366
-rw-r--r--arch/arm64/kernel/setup.c17
-rw-r--r--arch/arm64/kernel/signal.c188
-rw-r--r--arch/arm64/kernel/stacktrace.c124
-rw-r--r--arch/arm64/kernel/syscall.c29
-rw-r--r--arch/arm64/kernel/traps.c1
-rw-r--r--arch/arm64/kvm/fpsimd.c43
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c30
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c11
-rw-r--r--arch/arm64/kvm/sys_regs.c9
-rw-r--r--arch/arm64/mm/init.c65
-rw-r--r--arch/arm64/tools/cpucaps2
-rw-r--r--arch/x86/kernel/ftrace.c17
-rw-r--r--drivers/of/fdt.c33
-rw-r--r--drivers/of/kexec.c9
-rw-r--r--drivers/perf/arm-cmn.c609
-rw-r--r--drivers/perf/arm_pmu_acpi.c4
-rw-r--r--drivers/perf/arm_spe_pmu.c3
-rw-r--r--drivers/perf/hisilicon/Makefile2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c409
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c18
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h1
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c6
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--fs/btrfs/ioctl.c7
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/uaccess.h22
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/prctl.h9
-rw-r--r--kernel/crash_core.c3
-rw-r--r--kernel/sys.c12
-rw-r--r--kernel/trace/fgraph.c18
-rw-r--r--mm/gup.c29
74 files changed, 3785 insertions, 460 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 3f1cc5e317ed..1b543c3109f4 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -808,7 +808,7 @@
Documentation/admin-guide/kdump/kdump.rst for an example.
crashkernel=size[KMG],high
- [KNL, X86-64] range could be above 4G. Allow kernel
+ [KNL, X86-64, ARM64] range could be above 4G. Allow kernel
to allocate physical memory region from top, so could
be above 4G if system have more than 4G ram installed.
Otherwise memory region will be allocated below 4G, if
@@ -821,14 +821,20 @@
that require some amount of low memory, e.g. swiotlb
requires at least 64M+32K low memory, also enough extra
low memory is needed to make sure DMA buffers for 32-bit
- devices won't run out. Kernel would try to allocate at
+ devices won't run out. Kernel would try to allocate
at least 256M below 4G automatically.
- This one let user to specify own low range under 4G
+ This one lets the user specify own low range under 4G
for second kernel instead.
0: to disable low allocation.
It will be ignored when crashkernel=X,high is not used
or memory reserved is below 4G.
+ [KNL, ARM64] range in low memory.
+ This one lets the user specify a low range in the
+ DMA zone for the crash dump kernel.
+ It will be ignored when crashkernel=X,high is not used
+ or memory reserved is located in the DMA zones.
+
cryptomgr.notests
[KNL] Disable crypto self-tests
diff --git a/Documentation/arm64/elf_hwcaps.rst b/Documentation/arm64/elf_hwcaps.rst
index a8f30963e550..f8d818eaaff5 100644
--- a/Documentation/arm64/elf_hwcaps.rst
+++ b/Documentation/arm64/elf_hwcaps.rst
@@ -264,6 +264,39 @@ HWCAP2_MTE3
Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0011, as described
by Documentation/arm64/memory-tagging-extension.rst.
+HWCAP2_SME
+
+ Functionality implied by ID_AA64PFR1_EL1.SME == 0b0001, as described
+ by Documentation/arm64/sme.rst.
+
+HWCAP2_SME_I16I64
+
+ Functionality implied by ID_AA64SMFR0_EL1.I16I64 == 0b1111.
+
+HWCAP2_SME_F64F64
+
+ Functionality implied by ID_AA64SMFR0_EL1.F64F64 == 0b1.
+
+HWCAP2_SME_I8I32
+
+ Functionality implied by ID_AA64SMFR0_EL1.I8I32 == 0b1111.
+
+HWCAP2_SME_F16F32
+
+ Functionality implied by ID_AA64SMFR0_EL1.F16F32 == 0b1.
+
+HWCAP2_SME_B16F32
+
+ Functionality implied by ID_AA64SMFR0_EL1.B16F32 == 0b1.
+
+HWCAP2_SME_F32F32
+
+ Functionality implied by ID_AA64SMFR0_EL1.F32F32 == 0b1.
+
+HWCAP2_SME_FA64
+
+ Functionality implied by ID_AA64SMFR0_EL1.FA64 == 0b1.
+
4. Unused AT_HWCAP bits
-----------------------
diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst
index 4f840bac083e..ae21f8118830 100644
--- a/Documentation/arm64/index.rst
+++ b/Documentation/arm64/index.rst
@@ -21,6 +21,7 @@ ARM64 Architecture
perf
pointer-authentication
silicon-errata
+ sme
sve
tagged-address-abi
tagged-pointers
diff --git a/Documentation/arm64/sme.rst b/Documentation/arm64/sme.rst
new file mode 100644
index 000000000000..8ba677b87e90
--- /dev/null
+++ b/Documentation/arm64/sme.rst
@@ -0,0 +1,428 @@
+===================================================
+Scalable Matrix Extension support for AArch64 Linux
+===================================================
+
+This document outlines briefly the interface provided to userspace by Linux in
+order to support use of the ARM Scalable Matrix Extension (SME).
+
+This is an outline of the most important features and issues only and not
+intended to be exhaustive. It should be read in conjunction with the SVE
+documentation in sve.rst which provides details on the Streaming SVE mode
+included in SME.
+
+This document does not aim to describe the SME architecture or programmer's
+model. To aid understanding, a minimal description of relevant programmer's
+model features for SME is included in Appendix A.
+
+
+1. General
+-----------
+
+* PSTATE.SM, PSTATE.ZA, the streaming mode vector length, the ZA
+ register state and TPIDR2_EL0 are tracked per thread.
+
+* The presence of SME is reported to userspace via HWCAP2_SME in the aux vector
+ AT_HWCAP2 entry. Presence of this flag implies the presence of the SME
+ instructions and registers, and the Linux-specific system interfaces
+ described in this document. SME is reported in /proc/cpuinfo as "sme".
+
+* Support for the execution of SME instructions in userspace can also be
+ detected by reading the CPU ID register ID_AA64PFR1_EL1 using an MRS
+ instruction, and checking that the value of the SME field is nonzero. [3]
+
+ It does not guarantee the presence of the system interfaces described in the
+ following sections: software that needs to verify that those interfaces are
+ present must check for HWCAP2_SME instead.
+
+* There are a number of optional SME features, presence of these is reported
+ through AT_HWCAP2 through:
+
+ HWCAP2_SME_I16I64
+ HWCAP2_SME_F64F64
+ HWCAP2_SME_I8I32
+ HWCAP2_SME_F16F32
+ HWCAP2_SME_B16F32
+ HWCAP2_SME_F32F32
+ HWCAP2_SME_FA64
+
+ This list may be extended over time as the SME architecture evolves.
+
+ These extensions are also reported via the CPU ID register ID_AA64SMFR0_EL1,
+ which userspace can read using an MRS instruction. See elf_hwcaps.txt and
+ cpu-feature-registers.txt for details.
+
+* Debuggers should restrict themselves to interacting with the target via the
+ NT_ARM_SVE, NT_ARM_SSVE and NT_ARM_ZA regsets. The recommended way
+ of detecting support for these regsets is to connect to a target process
+ first and then attempt a
+
+ ptrace(PTRACE_GETREGSET, pid, NT_ARM_<regset>, &iov).
+
+* Whenever ZA register values are exchanged in memory between userspace and
+ the kernel, the register value is encoded in memory as a series of horizontal
+ vectors from 0 to VL/8-1 stored in the same endianness invariant format as is
+ used for SVE vectors.
+
+* On thread creation TPIDR2_EL0 is preserved unless CLONE_SETTLS is specified,
+ in which case it is set to 0.
+
+2. Vector lengths
+------------------
+
+SME defines a second vector length similar to the SVE vector length which is
+controls the size of the streaming mode SVE vectors and the ZA matrix array.
+The ZA matrix is square with each side having as many bytes as a streaming
+mode SVE vector.
+
+
+3. Sharing of streaming and non-streaming mode SVE state
+---------------------------------------------------------
+
+It is implementation defined which if any parts of the SVE state are shared
+between streaming and non-streaming modes. When switching between modes
+via software interfaces such as ptrace if no register content is provided as
+part of switching no state will be assumed to be shared and everything will
+be zeroed.
+
+
+4. System call behaviour
+-------------------------
+
+* On syscall PSTATE.ZA is preserved, if PSTATE.ZA==1 then the contents of the
+ ZA matrix are preserved.
+
+* On syscall PSTATE.SM will be cleared and the SVE registers will be handled
+ as per the standard SVE ABI.
+
+* Neither the SVE registers nor ZA are used to pass arguments to or receive
+ results from any syscall.
+
+* On process creation (eg, clone()) the newly created process will have
+ PSTATE.SM cleared.
+
+* All other SME state of a thread, including the currently configured vector
+ length, the state of the PR_SME_VL_INHERIT flag, and the deferred vector
+ length (if any), is preserved across all syscalls, subject to the specific
+ exceptions for execve() described in section 6.
+
+
+5. Signal handling
+-------------------
+
+* Signal handlers are invoked with streaming mode and ZA disabled.
+
+* A new signal frame record za_context encodes the ZA register contents on
+ signal delivery. [1]
+
+* The signal frame record for ZA always contains basic metadata, in particular
+ the thread's vector length (in za_context.vl).
+
+* The ZA matrix may or may not be included in the record, depending on
+ the value of PSTATE.ZA. The registers are present if and only if:
+ za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
+ in which case PSTATE.ZA == 1.
+
+* If matrix data is present, the remainder of the record has a vl-dependent
+ size and layout. Macros ZA_SIG_* are defined [1] to facilitate access to
+ them.
+
+* The matrix is stored as a series of horizontal vectors in the same format as
+ is used for SVE vectors.
+
+* If the ZA context is too big to fit in sigcontext.__reserved[], then extra
+ space is allocated on the stack, an extra_context record is written in
+ __reserved[] referencing this space. za_context is then written in the
+ extra space. Refer to [1] for further details about this mechanism.
+
+
+5. Signal return
+-----------------
+
+When returning from a signal handler:
+
+* If there is no za_context record in the signal frame, or if the record is
+ present but contains no register data as described in the previous section,
+ then ZA is disabled.
+
+* If za_context is present in the signal frame and contains matrix data then
+ PSTATE.ZA is set to 1 and ZA is populated with the specified data.
+
+* The vector length cannot be changed via signal return. If za_context.vl in
+ the signal frame does not match the current vector length, the signal return
+ attempt is treated as illegal, resulting in a forced SIGSEGV.
+
+
+6. prctl extensions
+--------------------
+
+Some new prctl() calls are added to allow programs to manage the SME vector
+length:
+
+prctl(PR_SME_SET_VL, unsigned long arg)
+
+ Sets the vector length of the calling thread and related flags, where
+ arg == vl | flags. Other threads of the calling process are unaffected.
+
+ vl is the desired vector length, where sve_vl_valid(vl) must be true.
+
+ flags:
+
+ PR_SME_VL_INHERIT
+
+ Inherit the current vector length across execve(). Otherwise, the
+ vector length is reset to the system default at execve(). (See
+ Section 9.)
+
+ PR_SME_SET_VL_ONEXEC
+
+ Defer the requested vector length change until the next execve()
+ performed by this thread.
+
+ The effect is equivalent to implicit execution of the following
+ call immediately after the next execve() (if any) by the thread:
+
+ prctl(PR_SME_SET_VL, arg & ~PR_SME_SET_VL_ONEXEC)
+
+ This allows launching of a new program with a different vector
+ length, while avoiding runtime side effects in the caller.
+
+ Without PR_SME_SET_VL_ONEXEC, the requested change takes effect
+ immediately.
+
+
+ Return value: a nonnegative on success, or a negative value on error:
+ EINVAL: SME not supported, invalid vector length requested, or
+ invalid flags.
+
+
+ On success:
+
+ * Either the calling thread's vector length or the deferred vector length
+ to be applied at the next execve() by the thread (dependent on whether
+ PR_SME_SET_VL_ONEXEC is present in arg), is set to the largest value
+ supported by the system that is less than or equal to vl. If vl ==
+ SVE_VL_MAX, the value set will be the largest value supported by the
+ system.
+
+ * Any previously outstanding deferred vector length change in the calling
+ thread is cancelled.
+
+ * The returned value describes the resulting configuration, encoded as for
+ PR_SME_GET_VL. The vector length reported in this value is the new
+ current vector length for this thread if PR_SME_SET_VL_ONEXEC was not
+ present in arg; otherwise, the reported vector length is the deferred
+ vector length that will be applied at the next execve() by the calling
+ thread.
+
+ * Changing the vector length causes all of ZA, P0..P15, FFR and all bits of
+ Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
+ unspecified, including both streaming and non-streaming SVE state.
+ Calling PR_SME_SET_VL with vl equal to the thread's current vector
+ length, or calling PR_SME_SET_VL with the PR_SVE_SET_VL_ONEXEC flag,
+ does not constitute a change to the vector length for this purpose.
+
+ * Changing the vector length causes PSTATE.ZA and PSTATE.SM to be cleared.
+ Calling PR_SME_SET_VL with vl equal to the thread's current vector
+ length, or calling PR_SME_SET_VL with the PR_SVE_SET_VL_ONEXEC flag,
+ does not constitute a change to the vector length for this purpose.
+
+
+prctl(PR_SME_GET_VL)
+
+ Gets the vector length of the calling thread.
+
+ The following flag may be OR-ed into the result:
+
+ PR_SME_VL_INHERIT
+
+ Vector length will be inherited across execve().
+
+ There is no way to determine whether there is an outstanding deferred
+ vector length change (which would only normally be the case between a
+ fork() or vfork() and the corresponding execve() in typical use).
+
+ To extract the vector length from the result, bitwise and it with
+ PR_SME_VL_LEN_MASK.
+
+ Return value: a nonnegative value on success, or a negative value on error:
+ EINVAL: SME not supported.
+
+
+7. ptrace extensions
+---------------------
+
+* A new regset NT_ARM_SSVE is defined for access to streaming mode SVE
+ state via PTRACE_GETREGSET and PTRACE_SETREGSET, this is documented in
+ sve.rst.
+
+* A new regset NT_ARM_ZA is defined for ZA state for access to ZA state via
+ PTRACE_GETREGSET and PTRACE_SETREGSET.
+
+ Refer to [2] for definitions.
+
+The regset data starts with struct user_za_header, containing:
+
+ size
+
+ Size of the complete regset, in bytes.
+ This depends on vl and possibly on other things in the future.
+
+ If a call to PTRACE_GETREGSET requests less data than the value of
+ size, the caller can allocate a larger buffer and retry in order to
+ read the complete regset.
+
+ max_size
+
+ Maximum size in bytes that the regset can grow to for the target
+ thread. The regset won't grow bigger than this even if the target
+ thread changes its vector length etc.
+
+ vl
+
+ Target thread's current streaming vector length, in bytes.
+
+ max_vl
+
+ Maximum possible streaming vector length for the target thread.
+
+ flags
+
+ Zero or more of the following flags, which have the same
+ meaning and behaviour as the corresponding PR_SET_VL_* flags:
+
+ SME_PT_VL_INHERIT
+
+ SME_PT_VL_ONEXEC (SETREGSET only).
+
+* The effects of changing the vector length and/or flags are equivalent to
+ those documented for PR_SME_SET_VL.
+
+ The caller must make a further GETREGSET call if it needs to know what VL is
+ actually set by SETREGSET, unless is it known in advance that the requested
+ VL is supported.
+
+* The size and layout of the payload depends on the header fields. The
+ SME_PT_ZA_*() macros are provided to facilitate access to the data.
+
+* In either case, for SETREGSET it is permissible to omit the payload, in which
+ case the vector length and flags are changed and PSTATE.ZA is set to 0
+ (along with any consequences of those changes). If a payload is provided
+ then PSTATE.ZA will be set to 1.
+
+* For SETREGSET, if the requested VL is not supported, the effect will be the
+ same as if the payload were omitted, except that an EIO error is reported.
+ No attempt is made to translate the payload data to the correct layout
+ for the vector length actually set. It is up to the caller to translate the
+ payload layout for the actual VL and retry.
+
+* The effect of writing a partial, incomplete payload is unspecified.
+
+
+8. ELF coredump extensions
+---------------------------
+
+* NT_ARM_SSVE notes will be added to each coredump for
+ each thread of the dumped process. The contents will be equivalent to the
+ data that would have been read if a PTRACE_GETREGSET of the corresponding
+ type were executed for each thread when the coredump was generated.
+
+* A NT_ARM_ZA note will be added to each coredump for each thread of the
+ dumped process. The contents will be equivalent to the data that would have
+ been read if a PTRACE_GETREGSET of NT_ARM_ZA were executed for each thread
+ when the coredump was generated.
+
+
+9. System runtime configuration
+--------------------------------
+
+* To mitigate the ABI impact of expansion of the signal frame, a policy
+ mechanism is provided for administrators, distro maintainers and developers
+ to set the default vector length for userspace processes:
+
+/proc/sys/abi/sme_default_vector_length
+
+ Writing the text representation of an integer to this file sets the system
+ default vector length to the specified value, unless the value is greater
+ than the maximum vector length supported by the system in which case the
+ default vector length is set to that maximum.
+
+ The result can be determined by reopening the file and reading its
+ contents.
+
+ At boot, the default vector length is initially set to 32 or the maximum
+ supported vector length, whichever is smaller and supported. This
+ determines the initial vector length of the init process (PID 1).
+
+ Reading this file returns the current system default vector length.
+
+* At every execve() call, the new vector length of the new process is set to
+ the system default vector length, unless
+
+ * PR_SME_VL_INHERIT (or equivalently SME_PT_VL_INHERIT) is set for the
+ calling thread, or
+
+ * a deferred vector length change is pending, established via the
+ PR_SME_SET_VL_ONEXEC flag (or SME_PT_VL_ONEXEC).
+
+* Modifying the system default vector length does not affect the vector length
+ of any existing process or thread that does not make an execve() call.
+
+
+Appendix A. SME programmer's model (informative)
+=================================================
+
+This section provides a minimal description of the additions made by SVE to the
+ARMv8-A programmer's model that are relevant to this document.
+
+Note: This section is for information only and not intended to be complete or
+to replace any architectural specification.
+
+A.1. Registers
+---------------
+
+In A64 state, SME adds the following:
+
+* A new mode, streaming mode, in which a subset of the normal FPSIMD and SVE
+ features are available. When supported EL0 software may enter and leave
+ streaming mode at any time.
+
+ For best system performance it is strongly encouraged for software to enable
+ streaming mode only when it is actively being used.
+
+* A new vector length controlling the size of ZA and the Z registers when in
+ streaming mode, separately to the vector length used for SVE when not in
+ streaming mode. There is no requirement that either the currently selected
+ vector length or the set of vector lengths supported for the two modes in
+ a given system have any relationship. The streaming mode vector length
+ is referred to as SVL.
+
+* A new ZA matrix register. This is a square matrix of SVLxSVL bits. Most
+ operations on ZA require that streaming mode be enabled but ZA can be
+ enabled without streaming mode in order to load, save and retain data.
+
+ For best system performance it is strongly encouraged for software to enable
+ ZA only when it is actively being used.
+
+* Two new 1 bit fields in PSTATE which may be controlled via the SMSTART and
+ SMSTOP instructions or by access to the SVCR system register:
+
+ * PSTATE.ZA, if this is 1 then the ZA matrix is accessible and has valid
+ data while if it is 0 then ZA can not be accessed. When PSTATE.ZA is
+ changed from 0 to 1 all bits in ZA are cleared.
+
+ * PSTATE.SM, if this is 1 then the PE is in streaming mode. When the value
+ of PSTATE.SM is changed then it is implementation defined if the subset
+ of the floating point register bits valid in both modes may be retained.
+ Any other bits will be cleared.
+
+
+References
+==========
+
+[1] arch/arm64/include/uapi/asm/sigcontext.h
+ AArch64 Linux signal ABI definitions
+
+[2] arch/arm64/include/uapi/asm/ptrace.h
+ AArch64 Linux ptrace ABI definitions
+
+[3] Documentation/arm64/cpu-feature-registers.rst
diff --git a/Documentation/arm64/sve.rst b/Documentation/arm64/sve.rst
index 9d9a4de5bc34..93c2c2990584 100644
--- a/Documentation/arm64/sve.rst
+++ b/Documentation/arm64/sve.rst
@@ -7,7 +7,9 @@ Author: Dave Martin <Dave.Martin@arm.com>
Date: 4 August 2017
This document outlines briefly the interface provided to userspace by Linux in
-order to support use of the ARM Scalable Vector Extension (SVE).
+order to support use of the ARM Scalable Vector Extension (SVE), including
+interactions with Streaming SVE mode added by the Scalable Matrix Extension
+(SME).
This is an outline of the most important features and issues only and not
intended to be exhaustive.
@@ -23,6 +25,10 @@ model features for SVE is included in Appendix A.
* SVE registers Z0..Z31, P0..P15 and FFR and the current vector length VL, are
tracked per-thread.
+* In streaming mode FFR is not accessible unless HWCAP2_SME_FA64 is present
+ in the system, when it is not supported and these interfaces are used to
+ access streaming mode FFR is read and written as zero.
+
* The presence of SVE is reported to userspace via HWCAP_SVE in the aux vector
AT_HWCAP entry. Presence of this flag implies the presence of the SVE
instructions and registers, and the Linux-specific system interfaces
@@ -53,10 +59,19 @@ model features for SVE is included in Appendix A.
which userspace can read using an MRS instruction. See elf_hwcaps.txt and
cpu-feature-registers.txt for details.
+* On hardware that supports the SME extensions, HWCAP2_SME will also be
+ reported in the AT_HWCAP2 aux vector entry. Among other things SME adds
+ streaming mode which provides a subset of the SVE feature set using a
+ separate SME vector length and the same Z/V registers. See sme.rst
+ for more details.
+
* Debuggers should restrict themselves to interacting with the target via the
NT_ARM_SVE regset. The recommended way of detecting support for this regset
is to connect to a target process first and then attempt a
- ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
+ ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov). Note that when SME is
+ present and streaming SVE mode is in use the FPSIMD subset of registers
+ will be read via NT_ARM_SVE and NT_ARM_SVE writes will exit streaming mode
+ in the target.
* Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory
between userspace and the kernel, the register value is encoded in memory in
@@ -126,6 +141,11 @@ the SVE instruction set architecture.
are only present in fpsimd_context. For convenience, the content of V0..V31
is duplicated between sve_context and fpsimd_context.
+* The record contains a flag field which includes a flag SVE_SIG_FLAG_SM which
+ if set indicates that the thread is in streaming mode and the vector length
+ and register data (if present) describe the streaming SVE data and vector
+ length.
+
* The signal frame record for SVE always contains basic metadata, in particular
the thread's vector length (in sve_context.vl).
@@ -170,6 +190,11 @@ When returning from a signal handler:
the signal frame does not match the current vector length, the signal return
attempt is treated as illegal, resulting in a forced SIGSEGV.
+* It is permitted to enter or leave streaming mode by setting or clearing
+ the SVE_SIG_FLAG_SM flag but applications should take care to ensure that
+ when doing so sve_context.vl and any register data are appropriate for the
+ vector length in the new mode.
+
6. prctl extensions
--------------------
@@ -265,8 +290,14 @@ prctl(PR_SVE_GET_VL)
7. ptrace extensions
---------------------
-* A new regset NT_ARM_SVE is defined for use with PTRACE_GETREGSET and
- PTRACE_SETREGSET.
+* New regsets NT_ARM_SVE and NT_ARM_SSVE are defined for use with
+ PTRACE_GETREGSET and PTRACE_SETREGSET. NT_ARM_SSVE describes the
+ streaming mode SVE registers and NT_ARM_SVE describes the
+ non-streaming mode SVE registers.
+
+ In this description a register set is referred to as being "live" when
+ the target is in the appropriate streaming or non-streaming mode and is
+ using data beyond the subset shared with the FPSIMD Vn registers.
Refer to [2] for definitions.
@@ -297,7 +328,7 @@ The regset data starts with struct user_sve_header, containing:
flags
- either
+ at most one of
SVE_PT_REGS_FPSIMD
@@ -331,6 +362,10 @@ The regset data starts with struct user_sve_header, containing:
SVE_PT_VL_ONEXEC (SETREGSET only).
+ If neither FPSIMD nor SVE flags are provided then no register
+ payload is available, this is only possible when SME is implemented.
+
+
* The effects of changing the vector length and/or flags are equivalent to
those documented for PR_SVE_SET_VL.
@@ -346,6 +381,13 @@ The regset data starts with struct user_sve_header, containing:
case only the vector length and flags are changed (along with any
consequences of those changes).
+* In systems supporting SME when in streaming mode a GETREGSET for
+ NT_REG_SVE will return only the user_sve_header with no register data,
+ similarly a GETREGSET for NT_REG_SSVE will not return any register data
+ when not in streaming mode.
+
+* A GETREGSET for NT_ARM_SSVE will never return SVE_PT_REGS_FPSIMD.
+
* For SETREGSET, if an SVE_PT_REGS_SVE payload is present and the
requested VL is not supported, the effect will be the same as if the
payload were omitted, except that an EIO error is reported. No
@@ -355,17 +397,25 @@ The regset data starts with struct user_sve_header, containing:
unspecified. It is up to the caller to translate the payload layout
for the actual VL and retry.
+* Where SME is implemented it is not possible to GETREGSET the register
+ state for normal SVE when in streaming mode, nor the streaming mode
+ register state when in normal mode, regardless of the implementation defined
+ behaviour of the hardware for sharing data between the two modes.
+
+* Any SETREGSET of NT_ARM_SVE will exit streaming mode if the target was in
+ streaming mode and any SETREGSET of NT_ARM_SSVE will enter streaming mode
+ if the target was not in streaming mode.
+
* The effect of writing a partial, incomplete payload is unspecified.
8. ELF coredump extensions
---------------------------
-* A NT_ARM_SVE note will be added to each coredump for each thread of the
- dumped process. The contents will be equivalent to the data that would have
- been read if a PTRACE_GETREGSET of NT_ARM_SVE were executed for each thread
- when the coredump was generated.
-
+* NT_ARM_SVE and NT_ARM_SSVE notes will be added to each coredump for
+ each thread of the dumped process. The contents will be equivalent to the
+ data that would have been read if a PTRACE_GETREGSET of the corresponding
+ type were executed for each thread when the coredump was generated.
9. System runtime configuration
--------------------------------
diff --git a/Documentation/devicetree/bindings/perf/arm,cmn.yaml b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
index 2d4219ec7eda..2e51072e794a 100644
--- a/Documentation/devicetree/bindings/perf/arm,cmn.yaml
+++ b/Documentation/devicetree/bindings/perf/arm,cmn.yaml
@@ -14,6 +14,8 @@ properties:
compatible:
enum:
- arm,cmn-600
+ - arm,cmn-650
+ - arm,cmn-700
- arm,ci-700
reg:
diff --git a/arch/Kconfig b/arch/Kconfig
index 29b0167c088b..b34032279926 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -24,6 +24,13 @@ config KEXEC_ELF
config HAVE_IMA_KEXEC
bool
+config ARCH_HAS_SUBPAGE_FAULTS
+ bool
+ help
+ Select if the architecture can check permissions at sub-page
+ granularity (e.g. arm64 MTE). The probe_user_*() functions
+ must be implemented.
+
config HOTPLUG_SMT
bool
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2741e98d27fd..1b3e961ffb00 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1870,6 +1870,7 @@ config ARM64_MTE
depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
+ select ARCH_HAS_SUBPAGE_FAULTS
select ARCH_USES_HIGH_VMA_FLAGS
help
Memory Tagging (part of the ARMv8.5 Extensions) provides
@@ -1938,6 +1939,17 @@ config ARM64_SVE
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
+config ARM64_SME
+ bool "ARM Scalable Matrix Extension support"
+ default y
+ depends on ARM64_SVE
+ help
+ The Scalable Matrix Extension (SME) is an extension to the AArch64
+ execution state which utilises a substantial subset of the SVE
+ instruction set, together with the addition of new architectural
+ register state capable of holding two dimensional matrix tiles to
+ enable various matrix operations.
+
config ARM64_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index a58e366f0b07..115cdec1ae87 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -58,11 +58,15 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
u64 reg_id_aa64zfr0;
+ u64 reg_id_aa64smfr0;
struct cpuinfo_32bit aarch32;
/* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
u64 reg_zcr;
+
+ /* pseudo-SMCR for recording maximum SMCR_EL1 LEN value: */
+ u64 reg_smcr;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c62e7e5e2f0c..14a8f3d93add 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -622,6 +622,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
return val > 0;
}
+static inline bool id_aa64pfr1_sme(u64 pfr1)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_SME_SHIFT);
+
+ return val > 0;
+}
+
static inline bool id_aa64pfr1_mte(u64 pfr1)
{
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
@@ -759,6 +766,23 @@ static __always_inline bool system_supports_sve(void)
cpus_have_const_cap(ARM64_SVE);
}
+static __always_inline bool system_supports_sme(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SME) &&
+ cpus_have_const_cap(ARM64_SME);
+}
+
+static __always_inline bool system_supports_fa64(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_SME) &&
+ cpus_have_const_cap(ARM64_SME_FA64);
+}
+
+static __always_inline bool system_supports_tpidr2(void)
+{
+ return system_supports_sme();
+}
+
static __always_inline bool system_supports_cnp(void)
{
return IS_ENABLED(CONFIG_ARM64_CNP) &&
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index c31be7eda9df..fabdbde0fe02 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -143,6 +143,50 @@
.Lskip_sve_\@:
.endm
+/* SME register access and priority mapping */
+.macro __init_el2_nvhe_sme
+ mrs x1, id_aa64pfr1_el1
+ ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
+ cbz x1, .Lskip_sme_\@
+
+ bic x0, x0, #CPTR_EL2_TSM // Also disable SME traps
+ msr cptr_el2, x0 // Disable copro. traps to EL2
+ isb
+
+ mrs x1, sctlr_el2
+ orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps
+ msr sctlr_el2, x1
+ isb
+
+ mov x1, #0 // SMCR controls
+
+ mrs_s x2, SYS_ID_AA64SMFR0_EL1
+ ubfx x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM?
+ cbz x2, .Lskip_sme_fa64_\@
+
+ orr x1, x1, SMCR_ELx_FA64_MASK
+.Lskip_sme_fa64_\@:
+
+ orr x1, x1, #SMCR_ELx_LEN_MASK // Enable full SME vector
+ msr_s SYS_SMCR_EL2, x1 // length for EL1.
+
+ mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported?
+ ubfx x1, x1, #SYS_SMIDR_EL1_SMPS_SHIFT, #1
+ cbz x1, .Lskip_sme_\@
+
+ msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
+
+ mrs x1, id_aa64mmfr1_el1 // HCRX_EL2 present?
+ ubfx x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
+ cbz x1, .Lskip_sme_\@
+
+ mrs_s x1, SYS_HCRX_EL2
+ orr x1, x1, #HCRX_EL2_SMPME_MASK // Enable priority mapping
+ msr_s SYS_HCRX_EL2, x1
+
+.Lskip_sme_\@:
+.endm
+
/* Disable any fine grained traps */
.macro __init_el2_fgt
mrs x1, id_aa64mmfr0_el1
@@ -153,15 +197,26 @@
mrs x1, id_aa64dfr0_el1
ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
cmp x1, #3
- b.lt .Lset_fgt_\@
+ b.lt .Lset_debug_fgt_\@
/* Disable PMSNEVFR_EL1 read and write traps */
orr x0, x0, #(1 << 62)
-.Lset_fgt_\@:
+.Lset_debug_fgt_\@:
msr_s SYS_HDFGRTR_EL2, x0
msr_s SYS_HDFGWTR_EL2, x0
- msr_s SYS_HFGRTR_EL2, xzr
- msr_s SYS_HFGWTR_EL2, xzr
+
+ mov x0, xzr
+ mrs x1, id_aa64pfr1_el1
+ ubfx x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
+ cbz x1, .Lset_fgt_\@
+
+ /* Disable nVHE traps of TPIDR2 and SMPRI */
+ orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
+ orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
+
+.Lset_fgt_\@:
+ msr_s SYS_HFGRTR_EL2, x0
+ msr_s SYS_HFGWTR_EL2, x0
msr_s SYS_HFGITR_EL2, xzr
mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
@@ -196,6 +251,7 @@
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
+ __init_el2_nvhe_sme
__init_el2_fgt
__init_el2_nvhe_prepare_eret
.endm
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index d52a0b269ee8..0467837fd66b 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -37,7 +37,8 @@
#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
/* Unallocated EC: 0x1B */
#define ESR_ELx_EC_FPAC (0x1C) /* EL1 and above */
-/* Unallocated EC: 0x1D - 0x1E */
+#define ESR_ELx_EC_SME (0x1D)
+/* Unallocated EC: 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
@@ -75,6 +76,7 @@
#define ESR_ELx_IL_SHIFT (25)
#define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT)
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
+#define ESR_ELx_ISS(esr) ((esr) & ESR_ELx_ISS_MASK)
/* ISS field definitions shared by different classes */
#define ESR_ELx_WNR_SHIFT (6)
@@ -327,6 +329,15 @@
#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
ESR_ELx_CP15_32_ISS_DIR_READ)
+/*
+ * ISS values for SME traps
+ */
+
+#define ESR_ELx_SME_ISS_SME_DISABLED 0
+#define ESR_ELx_SME_ISS_ILL 1
+#define ESR_ELx_SME_ISS_SM_DISABLED 2
+#define ESR_ELx_SME_ISS_ZA_DISABLED 3
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 339477dca551..2add7f33b7c2 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -64,6 +64,7 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
void do_sve_acc(unsigned int esr, struct pt_regs *regs);
+void do_sme_acc(unsigned int esr, struct pt_regs *regs);
void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs);
void do_sysinstr(unsigned int esr, struct pt_regs *regs);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index cb24385e3632..75caa2098d5b 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -32,6 +32,18 @@
#define VFP_STATE_SIZE ((32 * 8) + 4)
#endif
+/*
+ * When we defined the maximum SVE vector length we defined the ABI so
+ * that the maximum vector length included all the reserved for future
+ * expansion bits in ZCR rather than those just currently defined by
+ * the architecture. While SME follows a similar pattern the fact that
+ * it includes a square matrix means that any allocations that attempt
+ * to cover the maximum potential vector length (such as happen with
+ * the regset used for ptrace) end up being extremely large. Define
+ * the much lower actual limit for use in such situations.
+ */
+#define SME_VQ_MAX 16
+
struct task_struct;
extern void fpsimd_save_state(struct user_fpsimd_state *state);
@@ -46,11 +58,23 @@ extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
- void *sve_state, unsigned int sve_vl);
+ void *sve_state, unsigned int sve_vl,
+ void *za_state, unsigned int sme_vl,
+ u64 *svcr);
extern void fpsimd_flush_task_state(struct task_struct *target);
extern void fpsimd_save_and_flush_cpu_state(void);
+static inline bool thread_sm_enabled(struct thread_struct *thread)
+{
+ return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK);
+}
+
+static inline bool thread_za_enabled(struct thread_struct *thread)
+{
+ return system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_ZA_MASK);
+}
+
/* Maximum VL that SVE/SME VL-agnostic software can transparently support */
#define VL_ARCH_MAX 0x100
@@ -62,7 +86,14 @@ static inline size_t sve_ffr_offset(int vl)
static inline void *sve_pffr(struct thread_struct *thread)
{
- return (char *)thread->sve_state + sve_ffr_offset(thread_get_sve_vl(thread));
+ unsigned int vl;
+
+ if (system_supports_sme() && thread_sm_enabled(thread))
+ vl = thread_get_sme_vl(thread);
+ else
+ vl = thread_get_sve_vl(thread);
+
+ return (char *)thread->sve_state + sve_ffr_offset(vl);
}
extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
@@ -71,11 +102,17 @@ extern void sve_load_state(void const *state, u32 const *pfpsr,
extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
extern void sve_set_vq(unsigned long vq_minus_1);
+extern void sme_set_vq(unsigned long vq_minus_1);
+extern void za_save_state(void *state);
+extern void za_load_state(void const *state);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
extern u64 read_zcr_features(void);
+extern u64 read_smcr_features(void);
/*
* Helpers to translate bit indices in sve_vq_map to VQ values (and
@@ -119,6 +156,7 @@ struct vl_info {
extern void sve_alloc(struct task_struct *task);
extern void fpsimd_release_task(struct task_struct *task);
extern void fpsimd_sync_to_sve(struct task_struct *task);
+extern void fpsimd_force_sync_to_sve(struct task_struct *task);
extern void sve_sync_to_fpsimd(struct task_struct *task);
extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
@@ -171,6 +209,12 @@ static inline void write_vl(enum vec_type type, u64 val)
write_sysreg_s(tmp | val, SYS_ZCR_EL1);
break;
#endif
+#ifdef CONFIG_ARM64_SME
+ case ARM64_VEC_SME:
+ tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK;
+ write_sysreg_s(tmp | val, SYS_SMCR_EL1);
+ break;
+#endif
default:
WARN_ON_ONCE(1);
break;
@@ -208,6 +252,8 @@ static inline bool sve_vq_available(unsigned int vq)
return vq_available(ARM64_VEC_SVE, vq);
}
+size_t sve_state_size(struct task_struct const *task);
+
#else /* ! CONFIG_ARM64_SVE */
static inline void sve_alloc(struct task_struct *task) { }
@@ -247,8 +293,93 @@ static inline void vec_update_vq_map(enum vec_type t) { }
static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
static inline void sve_setup(void) { }
+static inline size_t sve_state_size(struct task_struct const *task)
+{
+ return 0;
+}
+
#endif /* ! CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static inline void sme_user_disable(void)
+{
+ sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0);
+}
+
+static inline void sme_user_enable(void)
+{
+ sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN);
+}
+
+static inline void sme_smstart_sm(void)
+{
+ asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
+}
+
+static inline void sme_smstop_sm(void)
+{
+ asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
+}
+
+static inline void sme_smstop(void)
+{
+ asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
+}
+
+extern void __init sme_setup(void);
+
+static inline int sme_max_vl(void)
+{
+ return vec_max_vl(ARM64_VEC_SME);
+}
+
+static inline int sme_max_virtualisable_vl(void)
+{
+ return vec_max_virtualisable_vl(ARM64_VEC_SME);
+}
+
+extern void sme_alloc(struct task_struct *task);
+extern unsigned int sme_get_vl(void);
+extern int sme_set_current_vl(unsigned long arg);
+extern int sme_get_current_vl(void);
+
+/*
+ * Return how many bytes of memory are required to store the full SME
+ * specific state (currently just ZA) for task, given task's currently
+ * configured vector length.
+ */
+static inline size_t za_state_size(struct task_struct const *task)
+{
+ unsigned int vl = task_get_sme_vl(task);
+
+ return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
+}
+
+#else
+
+static inline void sme_user_disable(void) { BUILD_BUG(); }
+static inline void sme_user_enable(void) { BUILD_BUG(); }
+
+static inline void sme_smstart_sm(void) { }
+static inline void sme_smstop_sm(void) { }
+static inline void sme_smstop(void) { }
+
+static inline void sme_alloc(struct task_struct *task) { }
+static inline void sme_setup(void) { }
+static inline unsigned int sme_get_vl(void) { return 0; }
+static inline int sme_max_vl(void) { return 0; }
+static inline int sme_max_virtualisable_vl(void) { return 0; }
+static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
+static inline int sme_get_current_vl(void) { return -EINVAL; }
+
+static inline size_t za_state_size(struct task_struct const *task)
+{
+ return 0;
+}
+
+#endif /* ! CONFIG_ARM64_SME */
+
/* For use by EFI runtime services calls only */
extern void __efi_fpsimd_begin(void);
extern void __efi_fpsimd_end(void);
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index 2509d7dde55a..5e0910cf4832 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -93,6 +93,12 @@
.endif
.endm
+.macro _sme_check_wv v
+ .if (\v) < 12 || (\v) > 15
+ .error "Bad vector select register \v."
+ .endif
+.endm
+
/* SVE instruction encodings for non-SVE-capable assemblers */
/* (pre binutils 2.28, all kernel capable clang versions support SVE) */
@@ -174,6 +180,54 @@
| (\np)
.endm
+/* SME instruction encodings for non-SME-capable assemblers */
+/* (pre binutils 2.38/LLVM 13) */
+
+/* RDSVL X\nx, #\imm */
+.macro _sme_rdsvl nx, imm
+ _check_general_reg \nx
+ _check_num (\imm), -0x20, 0x1f
+ .inst 0x04bf5800 \
+ | (\nx) \
+ | (((\imm) & 0x3f) << 5)
+.endm
+
+/*
+ * STR (vector from ZA array):
+ * STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _sme_str_zav nw, nxbase, offset=0
+ _sme_check_wv \nw
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe1200000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * LDR (vector to ZA array):
+ * LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
+ */
+.macro _sme_ldr_zav nw, nxbase, offset=0
+ _sme_check_wv \nw
+ _check_general_reg \nxbase
+ _check_num (\offset), -0x100, 0xff
+ .inst 0xe1000000 \
+ | (((\nw) & 3) << 13) \
+ | ((\nxbase) << 5) \
+ | ((\offset) & 7)
+.endm
+
+/*
+ * Zero the entire ZA array
+ * ZERO ZA
+ */
+.macro zero_za
+ .inst 0xc00800ff
+.endm
+
.macro __for from:req, to:req
.if (\from) == (\to)
_for__body %\from
@@ -208,6 +262,17 @@
921:
.endm
+/* Update SMCR_EL1.LEN with the new VQ */
+.macro sme_load_vq xvqminus1, xtmp, xtmp2
+ mrs_s \xtmp, SYS_SMCR_EL1
+ bic \xtmp2, \xtmp, SMCR_ELx_LEN_MASK
+ orr \xtmp2, \xtmp2, \xvqminus1
+ cmp \xtmp2, \xtmp
+ b.eq 921f
+ msr_s SYS_SMCR_EL1, \xtmp2 //self-synchronising
+921:
+.endm
+
/* Preserve the first 128-bits of Znz and zero the rest. */
.macro _sve_flush_z nz
_sve_check_zreg \nz
@@ -254,3 +319,25 @@
ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp
.endm
+
+.macro sme_save_za nxbase, xvl, nw
+ mov w\nw, #0
+
+423:
+ _sme_str_zav \nw, \nxbase
+ add x\nxbase, x\nxbase, \xvl
+ add x\nw, x\nw, #1
+ cmp \xvl, x\nw
+ bne 423b
+.endm
+
+.macro sme_load_za nxbase, xvl, nw
+ mov w\nw, #0
+
+423:
+ _sme_ldr_zav \nw, \nxbase
+ add x\nxbase, x\nxbase, \xvl
+ add x\nw, x\nw, #1
+ cmp \xvl, x\nw
+ bne 423b
+.endm
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 1494cfa8639b..dbc45a4157fa 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -80,8 +80,15 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
struct dyn_ftrace;
+struct ftrace_ops;
+struct ftrace_regs;
+
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop
+
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#define ftrace_graph_func ftrace_graph_func
#endif
#define ftrace_return_address(n) return_address(n)
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index 8db5ec0089db..9f0ce004fdbc 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -109,6 +109,14 @@
#define KERNEL_HWCAP_AFP __khwcap2_feature(AFP)
#define KERNEL_HWCAP_RPRES __khwcap2_feature(RPRES)
#define KERNEL_HWCAP_MTE3 __khwcap2_feature(MTE3)
+#define KERNEL_HWCAP_SME __khwcap2_feature(SME)
+#define KERNEL_HWCAP_SME_I16I64 __khwcap2_feature(SME_I16I64)
+#define KERNEL_HWCAP_SME_F64F64 __khwcap2_feature(SME_F64F64)
+#define KERNEL_HWCAP_SME_I8I32 __khwcap2_feature(SME_I8I32)
+#define KERNEL_HWCAP_SME_F16F32 __khwcap2_feature(SME_F16F32)
+#define KERNEL_HWCAP_SME_B16F32 __khwcap2_feature(SME_B16F32)
+#define KERNEL_HWCAP_SME_F32F32 __khwcap2_feature(SME_F32F32)
+#define KERNEL_HWCAP_SME_FA64 __khwcap2_feature(SME_FA64)
/*
* This yields a mask that user programs can use to figure out what
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 1767ded83888..13ae232ec4a1 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -279,6 +279,7 @@
#define CPTR_EL2_TCPAC (1U << 31)
#define CPTR_EL2_TAM (1 << 30)
#define CPTR_EL2_TTA (1 << 20)
+#define CPTR_EL2_TSM (1 << 12)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ (1 << 8)
#define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 94a27a7520f4..f8f0d30dd1a2 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -295,8 +295,11 @@ struct vcpu_reset_state {
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
+
+ /* Guest floating point state */
void *sve_state;
unsigned int sve_max_vl;
+ u64 svcr;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
@@ -451,6 +454,7 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
#define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
+#define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index adcb937342f1..aa523591a44e 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -47,6 +47,7 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
+size_t mte_probe_user_range(const char __user *uaddr, size_t size);
#else /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 73e38d9a540c..1d2ca4870b84 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -118,6 +118,7 @@ struct debug_info {
enum vec_type {
ARM64_VEC_SVE = 0,
+ ARM64_VEC_SME,
ARM64_VEC_MAX,
};
@@ -153,6 +154,7 @@ struct thread_struct {
unsigned int fpsimd_cpu;
void *sve_state; /* SVE registers, if any */
+ void *za_state; /* ZA register, if any */
unsigned int vl[ARM64_VEC_MAX]; /* vector length */
unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
unsigned long fault_address; /* fault info */
@@ -168,6 +170,8 @@ struct thread_struct {
u64 mte_ctrl;
#endif
u64 sctlr_user;
+ u64 svcr;
+ u64 tpidr2_el0;
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
@@ -181,6 +185,19 @@ static inline unsigned int thread_get_sve_vl(struct thread_struct *thread)
return thread_get_vl(thread, ARM64_VEC_SVE);
}
+static inline unsigned int thread_get_sme_vl(struct thread_struct *thread)
+{
+ return thread_get_vl(thread, ARM64_VEC_SME);
+}
+
+static inline unsigned int thread_get_cur_vl(struct thread_struct *thread)
+{
+ if (system_supports_sme() && (thread->svcr & SYS_SVCR_EL0_SM_MASK))
+ return thread_get_sme_vl(thread);
+ else
+ return thread_get_sve_vl(thread);
+}
+
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type);
void task_set_vl(struct task_struct *task, enum vec_type type,
unsigned long vl);
@@ -194,6 +211,11 @@ static inline unsigned int task_get_sve_vl(const struct task_struct *task)
return task_get_vl(task, ARM64_VEC_SVE);
}
+static inline unsigned int task_get_sme_vl(const struct task_struct *task)
+{
+ return task_get_vl(task, ARM64_VEC_SME);
+}
+
static inline void task_set_sve_vl(struct task_struct *task, unsigned long vl)
{
task_set_vl(task, ARM64_VEC_SVE, vl);
@@ -354,9 +376,11 @@ extern void __init minsigstksz_setup(void);
*/
#include <asm/fpsimd.h>
-/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
+/* Userspace interface for PR_S[MV]E_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
+#define SME_SET_VL(arg) sme_set_current_vl(arg)
+#define SME_GET_VL() sme_get_current_vl()
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index e77cdef9ca29..aec9315bf156 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -31,38 +31,6 @@ struct stack_info {
enum stack_type type;
};
-/*
- * A snapshot of a frame record or fp/lr register values, along with some
- * accounting information necessary for robust unwinding.
- *
- * @fp: The fp value in the frame record (or the real fp)
- * @pc: The lr value in the frame record (or the real lr)
- *
- * @stacks_done: Stacks which have been entirely unwound, for which it is no
- * longer valid to unwind to.
- *
- * @prev_fp: The fp that pointed to this frame record, or a synthetic value
- * of 0. This is used to ensure that within a stack, each
- * subsequent frame record is at an increasing address.
- * @prev_type: The type of stack this frame record was on, or a synthetic
- * value of STACK_TYPE_UNKNOWN. This is used to detect a
- * transition from one stack to another.
- *
- * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
- * associated with the most recently encountered replacement lr
- * value.
- */
-struct stackframe {
- unsigned long fp;
- unsigned long pc;
- DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
- unsigned long prev_fp;
- enum stack_type prev_type;
-#ifdef CONFIG_KRETPROBES
- struct llist_node *kr_cur;
-#endif
-};
-
extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
const char *loglvl);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index fbf5f8bb9055..bebfdd27296a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -118,6 +118,10 @@
* System registers, organised loosely by encoding but grouped together
* where the architected name contains an index. e.g. ID_MMFR<n>_EL1.
*/
+#define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3)
+#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3)
+#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3)
+
#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
@@ -181,6 +185,7 @@
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
+#define SYS_ID_AA64SMFR0_EL1 sys_reg(3, 0, 0, 4, 5)
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
@@ -204,6 +209,8 @@
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
+#define SYS_SMPRI_EL1 sys_reg(3, 0, 1, 2, 4)
+#define SYS_SMCR_EL1 sys_reg(3, 0, 1, 2, 6)
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
@@ -396,6 +403,8 @@
#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
#define TRBIDR_ALIGN_SHIFT 0
+#define SMPRI_EL1_PRIORITY_MASK 0xf
+
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
@@ -451,8 +460,13 @@
#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
+#define SYS_SMIDR_EL1 sys_reg(3, 1, 0, 0, 6)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
+#define SYS_SMIDR_EL1_IMPLEMENTER_SHIFT 24
+#define SYS_SMIDR_EL1_SMPS_SHIFT 15
+#define SYS_SMIDR_EL1_AFFINITY_SHIFT 0
+
#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
@@ -461,6 +475,10 @@
#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
+#define SYS_SVCR_EL0 sys_reg(3, 3, 4, 2, 2)
+#define SYS_SVCR_EL0_ZA_MASK 2
+#define SYS_SVCR_EL0_SM_MASK 1
+
#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
@@ -477,6 +495,7 @@
#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
+#define SYS_TPIDR2_EL0 sys_reg(3, 3, 13, 0, 5)
#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
@@ -546,6 +565,9 @@
#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
+#define SYS_HCRX_EL2 sys_reg(3, 4, 1, 2, 2)
+#define SYS_SMPRIMAP_EL2 sys_reg(3, 4, 1, 2, 5)
+#define SYS_SMCR_EL2 sys_reg(3, 4, 1, 2, 6)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
@@ -605,6 +627,7 @@
#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6)
#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
@@ -628,6 +651,7 @@
#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
#define SCTLR_ELx_ATA (BIT(43))
@@ -836,6 +860,7 @@
#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
/* id_aa64pfr1 */
+#define ID_AA64PFR1_SME_SHIFT 24
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
@@ -846,6 +871,7 @@
#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
#define ID_AA64PFR1_BT_BTI 0x1
+#define ID_AA64PFR1_SME 1
#define ID_AA64PFR1_MTE_NI 0x0
#define ID_AA64PFR1_MTE_EL0 0x1
@@ -874,6 +900,23 @@
#define ID_AA64ZFR0_AES_PMULL 0x2
#define ID_AA64ZFR0_SVEVER_SVE2 0x1
+/* id_aa64smfr0 */
+#define ID_AA64SMFR0_FA64_SHIFT 63
+#define ID_AA64SMFR0_I16I64_SHIFT 52
+#define ID_AA64SMFR0_F64F64_SHIFT 48
+#define ID_AA64SMFR0_I8I32_SHIFT 36
+#define ID_AA64SMFR0_F16F32_SHIFT 35
+#define ID_AA64SMFR0_B16F32_SHIFT 34
+#define ID_AA64SMFR0_F32F32_SHIFT 32
+
+#define ID_AA64SMFR0_FA64 0x1
+#define ID_AA64SMFR0_I16I64 0x4
+#define ID_AA64SMFR0_F64F64 0x1
+#define ID_AA64SMFR0_I8I32 0x4
+#define ID_AA64SMFR0_F16F32 0x1
+#define ID_AA64SMFR0_B16F32 0x1
+#define ID_AA64SMFR0_F32F32 0x1
+
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60
#define ID_AA64MMFR0_FGT_SHIFT 56
@@ -926,6 +969,7 @@
/* id_aa64mmfr1 */
#define ID_AA64MMFR1_ECBHB_SHIFT 60
+#define ID_AA64MMFR1_HCX_SHIFT 40
#define ID_AA64MMFR1_AFP_SHIFT 44
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
@@ -1119,9 +1163,24 @@
#define ZCR_ELx_LEN_SIZE 9
#define ZCR_ELx_LEN_MASK 0x1ff
+#define SMCR_ELx_FA64_SHIFT 31
+#define SMCR_ELx_FA64_MASK (1 << SMCR_ELx_FA64_SHIFT)
+
+/*
+ * The SMCR_ELx_LEN_* definitions intentionally include bits [8:4] which
+ * are reserved by the SME architecture for future expansion of the LEN
+ * field, with compatible semantics.
+ */
+#define SMCR_ELx_LEN_SHIFT 0
+#define SMCR_ELx_LEN_SIZE 9
+#define SMCR_ELx_LEN_MASK 0x1ff
+
#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */
#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_SMEN_EL1EN (BIT(24)) /* enable EL1 access */
+#define CPACR_EL1_SMEN_EL0EN (BIT(25)) /* enable EL0 access, if EL1EN set */
+
#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
@@ -1170,6 +1229,8 @@
#define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0)
+/* HCRX_EL2 definitions */
+#define HCRX_EL2_SMPME_MASK (1 << 5)
/* GIC Hypervisor interface registers */
/* ICH_MISR_EL2 bit definitions */
@@ -1233,6 +1294,12 @@
#define ICH_VTR_TDS_SHIFT 19
#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
+/* HFG[WR]TR_EL2 bit definitions */
+#define HFGxTR_EL2_nTPIDR2_EL0_SHIFT 55
+#define HFGxTR_EL2_nTPIDR2_EL0_MASK BIT_MASK(HFGxTR_EL2_nTPIDR2_EL0_SHIFT)
+#define HFGxTR_EL2_nSMPRI_EL1_SHIFT 54
+#define HFGxTR_EL2_nSMPRI_EL1_MASK BIT_MASK(HFGxTR_EL2_nSMPRI_EL1_SHIFT)
+
#define ARM64_FEATURE_FIELD_BITS 4
/* Create a mask for the feature bits of the specified feature. */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e1317b7c4525..848739c15de8 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -82,6 +82,8 @@ int arch_dup_task_struct(struct task_struct *dst,
#define TIF_SVE_VL_INHERIT 24 /* Inherit SVE vl_onexec across exec */
#define TIF_SSBD 25 /* Wants SSB mitigation */
#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
+#define TIF_SME 27 /* SME in use */
+#define TIF_SME_VL_INHERIT 28 /* Inherit SME vl_onexec across exec */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e8dce0cc5eaa..63f9c828f1a7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -460,4 +460,19 @@ static inline int __copy_from_user_flushcache(void *dst, const void __user *src,
}
#endif
+#ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+static inline size_t probe_subpage_writeable(const char __user *uaddr,
+ size_t size)
+{
+ if (!system_supports_mte())
+ return 0;
+ return mte_probe_user_range(uaddr, size);
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 99cb5d383048..b0256cec63b5 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -79,5 +79,13 @@
#define HWCAP2_AFP (1 << 20)
#define HWCAP2_RPRES (1 << 21)
#define HWCAP2_MTE3 (1 << 22)
+#define HWCAP2_SME (1 << 23)
+#define HWCAP2_SME_I16I64 (1 << 24)
+#define HWCAP2_SME_F64F64 (1 << 25)
+#define HWCAP2_SME_I8I32 (1 << 26)
+#define HWCAP2_SME_F16F32 (1 << 27)
+#define HWCAP2_SME_B16F32 (1 << 28)
+#define HWCAP2_SME_F32F32 (1 << 29)
+#define HWCAP2_SME_FA64 (1 << 30)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 758ae984ff97..7fa2f7036aa7 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -109,7 +109,7 @@ struct user_hwdebug_state {
} dbg_regs[16];
};
-/* SVE/FP/SIMD state (NT_ARM_SVE) */
+/* SVE/FP/SIMD state (NT_ARM_SVE & NT_ARM_SSVE) */
struct user_sve_header {
__u32 size; /* total meaningful regset content in bytes */
@@ -220,6 +220,7 @@ struct user_sve_header {
(SVE_PT_SVE_PREG_OFFSET(vq, __SVE_NUM_PREGS) - \
SVE_PT_SVE_PREGS_OFFSET(vq))
+/* For streaming mode SVE (SSVE) FFR must be read and written as zero */
#define SVE_PT_SVE_FFR_OFFSET(vq) \
(SVE_PT_REGS_OFFSET + __SVE_FFR_OFFSET(vq))
@@ -240,10 +241,12 @@ struct user_sve_header {
- SVE_PT_SVE_OFFSET + (__SVE_VQ_BYTES - 1)) \
/ __SVE_VQ_BYTES * __SVE_VQ_BYTES)
-#define SVE_PT_SIZE(vq, flags) \
- (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
- SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
- : SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
+#define SVE_PT_SIZE(vq, flags) \
+ (((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
+ SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
+ : ((((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD ? \
+ SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags) \
+ : SVE_PT_REGS_OFFSET)))
/* pointer authentication masks (NT_ARM_PAC_MASK) */
@@ -265,6 +268,62 @@ struct user_pac_generic_keys {
__uint128_t apgakey;
};
+/* ZA state (NT_ARM_ZA) */
+
+struct user_za_header {
+ __u32 size; /* total meaningful regset content in bytes */
+ __u32 max_size; /* maxmium possible size for this thread */
+ __u16 vl; /* current vector length */
+ __u16 max_vl; /* maximum possible vector length */
+ __u16 flags;
+ __u16 __reserved;
+};
+
+/*
+ * Common ZA_PT_* flags:
+ * These must be kept in sync with prctl interface in <linux/prctl.h>
+ */
+#define ZA_PT_VL_INHERIT ((1 << 17) /* PR_SME_VL_INHERIT */ >> 16)
+#define ZA_PT_VL_ONEXEC ((1 << 18) /* PR_SME_SET_VL_ONEXEC */ >> 16)
+
+
+/*
+ * The remainder of the ZA state follows struct user_za_header. The
+ * total size of the ZA state (including header) depends on the
+ * metadata in the header: ZA_PT_SIZE(vq, flags) gives the total size
+ * of the state in bytes, including the header.
+ *
+ * Refer to <asm/sigcontext.h> for details of how to pass the correct
+ * "vq" argument to these macros.
+ */
+
+/* Offset from the start of struct user_za_header to the register data */
+#define ZA_PT_ZA_OFFSET \
+ ((sizeof(struct user_za_header) + (__SVE_VQ_BYTES - 1)) \
+ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
+
+/*
+ * The payload starts at offset ZA_PT_ZA_OFFSET, and is of size
+ * ZA_PT_ZA_SIZE(vq, flags).
+ *
+ * The ZA array is stored as a sequence of horizontal vectors ZAV of SVL/8
+ * bytes each, starting from vector 0.
+ *
+ * Additional data might be appended in the future.
+ *
+ * The ZA matrix is represented in memory in an endianness-invariant layout
+ * which differs from the layout used for the FPSIMD V-registers on big-endian
+ * systems: see sigcontext.h for more explanation.
+ */
+
+#define ZA_PT_ZAV_OFFSET(vq, n) \
+ (ZA_PT_ZA_OFFSET + ((vq * __SVE_VQ_BYTES) * n))
+
+#define ZA_PT_ZA_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
+
+#define ZA_PT_SIZE(vq) \
+ (ZA_PT_ZA_OFFSET + ZA_PT_ZA_SIZE(vq))
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h
index 0c796c795dbe..4aaf31e3bf16 100644
--- a/arch/arm64/include/uapi/asm/sigcontext.h
+++ b/arch/arm64/include/uapi/asm/sigcontext.h
@@ -134,6 +134,17 @@ struct extra_context {
struct sve_context {
struct _aarch64_ctx head;
__u16 vl;
+ __u16 flags;
+ __u16 __reserved[2];
+};
+
+#define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */
+
+#define ZA_MAGIC 0x54366345
+
+struct za_context {
+ struct _aarch64_ctx head;
+ __u16 vl;
__u16 __reserved[3];
};
@@ -186,9 +197,16 @@ struct sve_context {
* sve_context.vl must equal the thread's current vector length when
* doing a sigreturn.
*
+ * On systems with support for SME the SVE register state may reflect either
+ * streaming or non-streaming mode. In streaming mode the streaming mode
+ * vector length will be used and the flag SVE_SIG_FLAG_SM will be set in
+ * the flags field. It is permitted to enter or leave streaming mode in
+ * a signal return, applications should take care to ensure that any difference
+ * in vector length between the two modes is handled, including any resizing
+ * and movement of context blocks.
*
- * Note: for all these macros, the "vq" argument denotes the SVE
- * vector length in quadwords (i.e., units of 128 bits).
+ * Note: for all these macros, the "vq" argument denotes the vector length
+ * in quadwords (i.e., units of 128 bits).
*
* The correct way to obtain vq is to use sve_vq_from_vl(vl). The
* result is valid if and only if sve_vl_valid(vl) is true. This is
@@ -249,4 +267,37 @@ struct sve_context {
#define SVE_SIG_CONTEXT_SIZE(vq) \
(SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
+/*
+ * If the ZA register is enabled for the thread at signal delivery then,
+ * za_context.head.size >= ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
+ * and the register data may be accessed using the ZA_SIG_*() macros.
+ *
+ * If za_context.head.size < ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za_context.vl))
+ * then ZA was not enabled and no register data was included in which case
+ * ZA register was not enabled for the thread and no register data
+ * the ZA_SIG_*() macros should not be used except for this check.
+ *
+ * The same convention applies when returning from a signal: a caller
+ * will need to remove or resize the za_context block if it wants to
+ * enable the ZA register when it was previously non-live or vice-versa.
+ * This may require the caller to allocate fresh memory and/or move other
+ * context blocks in the signal frame.
+ *
+ * Changing the vector length during signal return is not permitted:
+ * za_context.vl must equal the thread's current SME vector length when
+ * doing a sigreturn.
+ */
+
+#define ZA_SIG_REGS_OFFSET \
+ ((sizeof(struct za_context) + (__SVE_VQ_BYTES - 1)) \
+ / __SVE_VQ_BYTES * __SVE_VQ_BYTES)
+
+#define ZA_SIG_REGS_SIZE(vq) ((vq * __SVE_VQ_BYTES) * (vq * __SVE_VQ_BYTES))
+
+#define ZA_SIG_ZAV_OFFSET(vq, n) (ZA_SIG_REGS_OFFSET + \
+ (SVE_SIG_ZREG_SIZE(vq) * n))
+
+#define ZA_SIG_CONTEXT_SIZE(vq) \
+ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq))
+
#endif /* _UAPI__ASM_SIGCONTEXT_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d72c4b4d389c..619324b8bcef 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -261,6 +261,8 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SME_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
@@ -293,6 +295,24 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_FA64_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I16I64_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F64F64_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_I8I32_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F16F32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_B16F32_SHIFT, 1, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
+ FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_F32F32_SHIFT, 1, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
@@ -561,6 +581,12 @@ static const struct arm64_ftr_bits ftr_zcr[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_smcr[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
+ SMCR_ELx_LEN_SHIFT, SMCR_ELx_LEN_SIZE, 0), /* LEN */
+ ARM64_FTR_END,
+};
+
/*
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
@@ -645,6 +671,7 @@ static const struct __ftr_reg_entry {
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
&id_aa64pfr1_override),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
+ ARM64_FTR_REG(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0),
/* Op1 = 0, CRn = 0, CRm = 5 */
ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
@@ -666,6 +693,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 1, CRm = 2 */
ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+ ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr),
/* Op1 = 1, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
@@ -960,6 +988,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
+ init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0);
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
init_32bit_cpu_features(&info->aarch32);
@@ -969,6 +998,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
vec_init_vq_map(ARM64_VEC_SVE);
}
+ if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+ init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
+ if (IS_ENABLED(CONFIG_ARM64_SME))
+ vec_init_vq_map(ARM64_VEC_SME);
+ }
+
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
@@ -1195,6 +1230,9 @@ void update_cpu_features(int cpu,
taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
+ info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
+
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
info->reg_zcr, boot->reg_zcr);
@@ -1205,6 +1243,16 @@ void update_cpu_features(int cpu,
vec_update_vq_map(ARM64_VEC_SVE);
}
+ if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+ taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu,
+ info->reg_smcr, boot->reg_smcr);
+
+ /* Probe vector lengths, unless we already gave up on SME */
+ if (id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)) &&
+ !system_capabilities_finalized())
+ vec_update_vq_map(ARM64_VEC_SME);
+ }
+
/*
* The kernel uses the LDGM/STGM instructions and the number of tags
* they read/write depends on the GMID_EL1.BS field. Check that the
@@ -1288,6 +1336,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
read_sysreg_case(SYS_ID_AA64PFR0_EL1);
read_sysreg_case(SYS_ID_AA64PFR1_EL1);
read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
+ read_sysreg_case(SYS_ID_AA64SMFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
@@ -2442,6 +2491,33 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
.min_field_value = 1,
},
+#ifdef CONFIG_ARM64_SME
+ {
+ .desc = "Scalable Matrix Extension",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SME,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR1_SME_SHIFT,
+ .field_width = 4,
+ .min_field_value = ID_AA64PFR1_SME,
+ .matches = has_cpuid_feature,
+ .cpu_enable = sme_kernel_enable,
+ },
+ /* FA64 should be sorted after the base SME capability */
+ {
+ .desc = "FA64",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_SME_FA64,
+ .sys_reg = SYS_ID_AA64SMFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64SMFR0_FA64_SHIFT,
+ .field_width = 1,
+ .min_field_value = ID_AA64SMFR0_FA64,
+ .matches = has_cpuid_feature,
+ .cpu_enable = fa64_kernel_enable,
+ },
+#endif /* CONFIG_ARM64_SME */
{},
};
@@ -2575,6 +2651,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+#ifdef CONFIG_ARM64_SME
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_FA64, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I16I64_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I16I64, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F64F64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F64F64, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_I8I32_SHIFT, 4, FTR_UNSIGNED, ID_AA64SMFR0_I8I32, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F16F32, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_B16F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_B16F32, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
+ HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_F32F32_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_F32F32, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
+#endif /* CONFIG_ARM64_SME */
{},
};
@@ -2872,6 +2958,23 @@ static void verify_sve_features(void)
/* Add checks on other ZCR bits here if necessary */
}
+static void verify_sme_features(void)
+{
+ u64 safe_smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
+ u64 smcr = read_smcr_features();
+
+ unsigned int safe_len = safe_smcr & SMCR_ELx_LEN_MASK;
+ unsigned int len = smcr & SMCR_ELx_LEN_MASK;
+
+ if (len < safe_len || vec_verify_vq_map(ARM64_VEC_SME)) {
+ pr_crit("CPU%d: SME: vector length support mismatch\n",
+ smp_processor_id());
+ cpu_die_early();
+ }
+
+ /* Add checks on other SMCR bits here if necessary */
+}
+
static void verify_hyp_capabilities(void)
{
u64 safe_mmfr1, mmfr0, mmfr1;
@@ -2924,6 +3027,9 @@ static void verify_local_cpu_capabilities(void)
if (system_supports_sve())
verify_sve_features();
+ if (system_supports_sme())
+ verify_sme_features();
+
if (is_hyp_mode_available())
verify_hyp_capabilities();
}
@@ -3041,6 +3147,7 @@ void __init setup_cpu_features(void)
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
sve_setup();
+ sme_setup();
minsigstksz_setup();
/* Advertise that we have computed the system capabilities */
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 330b92ea863a..8a8136a096ac 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -98,6 +98,14 @@ static const char *const hwcap_str[] = {
[KERNEL_HWCAP_AFP] = "afp",
[KERNEL_HWCAP_RPRES] = "rpres",
[KERNEL_HWCAP_MTE3] = "mte3",
+ [KERNEL_HWCAP_SME] = "sme",
+ [KERNEL_HWCAP_SME_I16I64] = "smei16i64",
+ [KERNEL_HWCAP_SME_F64F64] = "smef64f64",
+ [KERNEL_HWCAP_SME_I8I32] = "smei8i32",
+ [KERNEL_HWCAP_SME_F16F32] = "smef16f32",
+ [KERNEL_HWCAP_SME_B16F32] = "smeb16f32",
+ [KERNEL_HWCAP_SME_F32F32] = "smef32f32",
+ [KERNEL_HWCAP_SME_FA64] = "smefa64",
};
#ifdef CONFIG_COMPAT
@@ -401,6 +409,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
+ info->reg_id_aa64smfr0 = read_cpuid(ID_AA64SMFR0_EL1);
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
info->reg_gmid = read_cpuid(GMID_EL1);
@@ -412,6 +421,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
info->reg_zcr = read_zcr_features();
+ if (IS_ENABLED(CONFIG_ARM64_SME) &&
+ id_aa64pfr1_sme(info->reg_id_aa64pfr1))
+ info->reg_smcr = read_smcr_features();
+
cpuinfo_detect_icache_policy(info);
}
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 878c65aa7206..29139e9a1517 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -537,6 +537,14 @@ static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
exit_to_user_mode(regs);
}
+static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
+{
+ enter_from_user_mode(regs);
+ local_daif_restore(DAIF_PROCCTX);
+ do_sme_acc(esr, regs);
+ exit_to_user_mode(regs);
+}
+
static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
{
enter_from_user_mode(regs);
@@ -645,6 +653,9 @@ asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
case ESR_ELx_EC_SVE:
el0_sve_acc(regs, esr);
break;
+ case ESR_ELx_EC_SME:
+ el0_sme_acc(regs, esr);
+ break;
case ESR_ELx_EC_FP_EXC64:
el0_fpsimd_exc(regs, esr);
break;
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index dc242e269f9a..229436f33df5 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -86,3 +86,39 @@ SYM_FUNC_START(sve_flush_live)
SYM_FUNC_END(sve_flush_live)
#endif /* CONFIG_ARM64_SVE */
+
+#ifdef CONFIG_ARM64_SME
+
+SYM_FUNC_START(sme_get_vl)
+ _sme_rdsvl 0, 1
+ ret
+SYM_FUNC_END(sme_get_vl)
+
+SYM_FUNC_START(sme_set_vq)
+ sme_load_vq x0, x1, x2
+ ret
+SYM_FUNC_END(sme_set_vq)
+
+/*
+ * Save the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_save_state)
+ _sme_rdsvl 1, 1 // x1 = VL/8
+ sme_save_za 0, x1, 12
+ ret
+SYM_FUNC_END(za_save_state)
+
+/*
+ * Load the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_load_state)
+ _sme_rdsvl 1, 1 // x1 = VL/8
+ sme_load_za 0, x1, 12
+ ret
+SYM_FUNC_END(za_load_state)
+
+#endif /* CONFIG_ARM64_SME */
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index e535480a4069..d42a205ef625 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -97,12 +97,6 @@ SYM_CODE_START(ftrace_common)
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
- nop // If enabled, this will be replaced
- // "b ftrace_graph_caller"
-#endif
-
/*
* At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
* x19-x29 per the AAPCS, and we created frame records upon entry, so we need
@@ -127,17 +121,6 @@ ftrace_common_return:
ret x9
SYM_CODE_END(ftrace_common)
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-SYM_CODE_START(ftrace_graph_caller)
- ldr x0, [sp, #S_PC]
- sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
- add x1, sp, #S_LR // parent_ip (callsite's LR)
- ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
- bl prepare_ftrace_return
- b ftrace_common_return
-SYM_CODE_END(ftrace_graph_caller)
-#endif
-
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 47af76e53221..a6eee3fa3448 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -121,7 +121,10 @@
struct fpsimd_last_state_struct {
struct user_fpsimd_state *st;
void *sve_state;
+ void *za_state;
+ u64 *svcr;
unsigned int sve_vl;
+ unsigned int sme_vl;
};
static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
@@ -136,6 +139,12 @@ __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX] = {
.max_virtualisable_vl = SVE_VL_MIN,
},
#endif
+#ifdef CONFIG_ARM64_SME
+ [ARM64_VEC_SME] = {
+ .type = ARM64_VEC_SME,
+ .name = "SME",
+ },
+#endif
};
static unsigned int vec_vl_inherit_flag(enum vec_type type)
@@ -143,6 +152,8 @@ static unsigned int vec_vl_inherit_flag(enum vec_type type)
switch (type) {
case ARM64_VEC_SVE:
return TIF_SVE_VL_INHERIT;
+ case ARM64_VEC_SME:
+ return TIF_SME_VL_INHERIT;
default:
WARN_ON_ONCE(1);
return 0;
@@ -186,6 +197,26 @@ extern void __percpu *efi_sve_state;
#endif /* ! CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static int get_sme_default_vl(void)
+{
+ return get_default_vl(ARM64_VEC_SME);
+}
+
+static void set_sme_default_vl(int val)
+{
+ set_default_vl(ARM64_VEC_SME, val);
+}
+
+static void sme_free(struct task_struct *);
+
+#else
+
+static inline void sme_free(struct task_struct *t) { }
+
+#endif
+
DEFINE_PER_CPU(bool, fpsimd_context_busy);
EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
@@ -206,10 +237,19 @@ static void __get_cpu_fpsimd_context(void)
*
* The double-underscore version must only be called if you know the task
* can't be preempted.
+ *
+ * On RT kernels local_bh_disable() is not sufficient because it only
+ * serializes soft interrupt related sections via a local lock, but stays
+ * preemptible. Disabling preemption is the right choice here as bottom
+ * half processing is always in thread context on RT kernels so it
+ * implicitly prevents bottom half processing as well.
*/
static void get_cpu_fpsimd_context(void)
{
- local_bh_disable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_disable();
+ else
+ preempt_disable();
__get_cpu_fpsimd_context();
}
@@ -230,7 +270,10 @@ static void __put_cpu_fpsimd_context(void)
static void put_cpu_fpsimd_context(void)
{
__put_cpu_fpsimd_context();
- local_bh_enable();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_bh_enable();
+ else
+ preempt_enable();
}
static bool have_cpu_fpsimd_context(void)
@@ -238,23 +281,6 @@ static bool have_cpu_fpsimd_context(void)
return !preemptible() && __this_cpu_read(fpsimd_context_busy);
}
-/*
- * Call __sve_free() directly only if you know task can't be scheduled
- * or preempted.
- */
-static void __sve_free(struct task_struct *task)
-{
- kfree(task->thread.sve_state);
- task->thread.sve_state = NULL;
-}
-
-static void sve_free(struct task_struct *task)
-{
- WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
-
- __sve_free(task);
-}
-
unsigned int task_get_vl(const struct task_struct *task, enum vec_type type)
{
return task->thread.vl[type];
@@ -279,16 +305,27 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
}
/*
+ * TIF_SME controls whether a task can use SME without trapping while
+ * in userspace, when TIF_SME is set then we must have storage
+ * alocated in sve_state and za_state to store the contents of both ZA
+ * and the SVE registers for both streaming and non-streaming modes.
+ *
+ * If both SVCR.ZA and SVCR.SM are disabled then at any point we
+ * may disable TIF_SME and reenable traps.
+ */
+
+
+/*
* TIF_SVE controls whether a task can use SVE without trapping while
- * in userspace, and also the way a task's FPSIMD/SVE state is stored
- * in thread_struct.
+ * in userspace, and also (together with TIF_SME) the way a task's
+ * FPSIMD/SVE state is stored in thread_struct.
*
* The kernel uses this flag to track whether a user task is actively
* using SVE, and therefore whether full SVE register state needs to
* be tracked. If not, the cheaper FPSIMD context handling code can
* be used instead of the more costly SVE equivalents.
*
- * * TIF_SVE set:
+ * * TIF_SVE or SVCR.SM set:
*
* The task can execute SVE instructions while in userspace without
* trapping to the kernel.
@@ -296,7 +333,8 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
* When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
* corresponding Zn), P0-P15 and FFR are encoded in in
* task->thread.sve_state, formatted appropriately for vector
- * length task->thread.sve_vl.
+ * length task->thread.sve_vl or, if SVCR.SM is set,
+ * task->thread.sme_vl.
*
* task->thread.sve_state must point to a valid buffer at least
* sve_state_size(task) bytes in size.
@@ -334,16 +372,44 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type,
*/
static void task_fpsimd_load(void)
{
+ bool restore_sve_regs = false;
+ bool restore_ffr;
+
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
+ /* Check if we should restore SVE first */
if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE)) {
sve_set_vq(sve_vq_from_vl(task_get_sve_vl(current)) - 1);
+ restore_sve_regs = true;
+ restore_ffr = true;
+ }
+
+ /* Restore SME, override SVE register configuration if needed */
+ if (system_supports_sme()) {
+ unsigned long sme_vl = task_get_sme_vl(current);
+
+ /* Ensure VL is set up for restoring data */
+ if (test_thread_flag(TIF_SME))
+ sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
+
+ write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
+
+ if (thread_za_enabled(&current->thread))
+ za_load_state(current->thread.za_state);
+
+ if (thread_sm_enabled(&current->thread)) {
+ restore_sve_regs = true;
+ restore_ffr = system_supports_fa64();
+ }
+ }
+
+ if (restore_sve_regs)
sve_load_state(sve_pffr(&current->thread),
- &current->thread.uw.fpsimd_state.fpsr, true);
- } else {
+ &current->thread.uw.fpsimd_state.fpsr,
+ restore_ffr);
+ else
fpsimd_load_state(&current->thread.uw.fpsimd_state);
- }
}
/*
@@ -361,6 +427,9 @@ static void fpsimd_save(void)
struct fpsimd_last_state_struct const *last =
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
+ bool save_sve_regs = false;
+ bool save_ffr;
+ unsigned int vl;
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
@@ -368,9 +437,32 @@ static void fpsimd_save(void)
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
return;
- if (IS_ENABLED(CONFIG_ARM64_SVE) &&
- test_thread_flag(TIF_SVE)) {
- if (WARN_ON(sve_get_vl() != last->sve_vl)) {
+ if (test_thread_flag(TIF_SVE)) {
+ save_sve_regs = true;
+ save_ffr = true;
+ vl = last->sve_vl;
+ }
+
+ if (system_supports_sme()) {
+ u64 *svcr = last->svcr;
+ *svcr = read_sysreg_s(SYS_SVCR_EL0);
+
+ *svcr = read_sysreg_s(SYS_SVCR_EL0);
+
+ if (*svcr & SYS_SVCR_EL0_ZA_MASK)
+ za_save_state(last->za_state);
+
+ /* If we are in streaming mode override regular SVE. */
+ if (*svcr & SYS_SVCR_EL0_SM_MASK) {
+ save_sve_regs = true;
+ save_ffr = system_supports_fa64();
+ vl = last->sme_vl;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_SVE) && save_sve_regs) {
+ /* Get the configured VL from RDVL, will account for SM */
+ if (WARN_ON(sve_get_vl() != vl)) {
/*
* Can't save the user regs, so current would
* re-enter user with corrupt state.
@@ -381,8 +473,8 @@ static void fpsimd_save(void)
}
sve_save_state((char *)last->sve_state +
- sve_ffr_offset(last->sve_vl),
- &last->st->fpsr, true);
+ sve_ffr_offset(vl),
+ &last->st->fpsr, save_ffr);
} else {
fpsimd_save_state(last->st);
}
@@ -409,6 +501,8 @@ static unsigned int find_supported_vector_length(enum vec_type type,
if (vl > max_vl)
vl = max_vl;
+ if (vl < info->min_vl)
+ vl = info->min_vl;
bit = find_next_bit(info->vq_map, SVE_VQ_MAX,
__vq_to_bit(sve_vq_from_vl(vl)));
@@ -467,6 +561,30 @@ static int __init sve_sysctl_init(void)
static int __init sve_sysctl_init(void) { return 0; }
#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
+#if defined(CONFIG_ARM64_SME) && defined(CONFIG_SYSCTL)
+static struct ctl_table sme_default_vl_table[] = {
+ {
+ .procname = "sme_default_vector_length",
+ .mode = 0644,
+ .proc_handler = vec_proc_do_default_vl,
+ .extra1 = &vl_info[ARM64_VEC_SME],
+ },
+ { }
+};
+
+static int __init sme_sysctl_init(void)
+{
+ if (system_supports_sme())
+ if (!register_sysctl("abi", sme_default_vl_table))
+ return -EINVAL;
+
+ return 0;
+}
+
+#else /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
+static int __init sme_sysctl_init(void) { return 0; }
+#endif /* ! (CONFIG_ARM64_SME && CONFIG_SYSCTL) */
+
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
@@ -520,7 +638,7 @@ static void fpsimd_to_sve(struct task_struct *task)
if (!system_supports_sve())
return;
- vq = sve_vq_from_vl(task_get_sve_vl(task));
+ vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
__fpsimd_to_sve(sst, fst, vq);
}
@@ -537,7 +655,7 @@ static void fpsimd_to_sve(struct task_struct *task)
*/
static void sve_to_fpsimd(struct task_struct *task)
{
- unsigned int vq;
+ unsigned int vq, vl;
void const *sst = task->thread.sve_state;
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
@@ -546,7 +664,8 @@ static void sve_to_fpsimd(struct task_struct *task)
if (!system_supports_sve())
return;
- vq = sve_vq_from_vl(task_get_sve_vl(task));
+ vl = thread_get_cur_vl(&task->thread);
+ vq = sve_vq_from_vl(vl);
for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
@@ -554,14 +673,37 @@ static void sve_to_fpsimd(struct task_struct *task)
}
#ifdef CONFIG_ARM64_SVE
+/*
+ * Call __sve_free() directly only if you know task can't be scheduled
+ * or preempted.
+ */
+static void __sve_free(struct task_struct *task)
+{
+ kfree(task->thread.sve_state);
+ task->thread.sve_state = NULL;
+}
+
+static void sve_free(struct task_struct *task)
+{
+ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
+
+ __sve_free(task);
+}
/*
* Return how many bytes of memory are required to store the full SVE
* state for task, given task's currently configured vector length.
*/
-static size_t sve_state_size(struct task_struct const *task)
+size_t sve_state_size(struct task_struct const *task)
{
- return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task_get_sve_vl(task)));
+ unsigned int vl = 0;
+
+ if (system_supports_sve())
+ vl = task_get_sve_vl(task);
+ if (system_supports_sme())
+ vl = max(vl, task_get_sme_vl(task));
+
+ return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
}
/*
@@ -588,6 +730,19 @@ void sve_alloc(struct task_struct *task)
/*
+ * Force the FPSIMD state shared with SVE to be updated in the SVE state
+ * even if the SVE state is the current active state.
+ *
+ * This should only be called by ptrace. task must be non-runnable.
+ * task->thread.sve_state must point to at least sve_state_size(task)
+ * bytes of allocated kernel memory.
+ */
+void fpsimd_force_sync_to_sve(struct task_struct *task)
+{
+ fpsimd_to_sve(task);
+}
+
+/*
* Ensure that task->thread.sve_state is up to date with respect to
* the user task, irrespective of when SVE is in use or not.
*
@@ -597,7 +752,8 @@ void sve_alloc(struct task_struct *task)
*/
void fpsimd_sync_to_sve(struct task_struct *task)
{
- if (!test_tsk_thread_flag(task, TIF_SVE))
+ if (!test_tsk_thread_flag(task, TIF_SVE) &&
+ !thread_sm_enabled(&task->thread))
fpsimd_to_sve(task);
}
@@ -611,7 +767,8 @@ void fpsimd_sync_to_sve(struct task_struct *task)
*/
void sve_sync_to_fpsimd(struct task_struct *task)
{
- if (test_tsk_thread_flag(task, TIF_SVE))
+ if (test_tsk_thread_flag(task, TIF_SVE) ||
+ thread_sm_enabled(&task->thread))
sve_to_fpsimd(task);
}
@@ -636,7 +793,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
- vq = sve_vq_from_vl(task_get_sve_vl(task));
+ vq = sve_vq_from_vl(thread_get_cur_vl(&task->thread));
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
__fpsimd_to_sve(sst, fst, vq);
@@ -680,8 +837,7 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
/*
* To ensure the FPSIMD bits of the SVE vector registers are preserved,
* write any live register state back to task_struct, and convert to a
- * regular FPSIMD thread. Since the vector length can only be changed
- * with a syscall we can't be in streaming mode while reconfiguring.
+ * regular FPSIMD thread.
*/
if (task == current) {
get_cpu_fpsimd_context();
@@ -690,17 +846,26 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type,
}
fpsimd_flush_task_state(task);
- if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE) ||
+ thread_sm_enabled(&task->thread))
sve_to_fpsimd(task);
+ if (system_supports_sme() && type == ARM64_VEC_SME) {
+ task->thread.svcr &= ~(SYS_SVCR_EL0_SM_MASK |
+ SYS_SVCR_EL0_ZA_MASK);
+ clear_thread_flag(TIF_SME);
+ }
+
if (task == current)
put_cpu_fpsimd_context();
/*
- * Force reallocation of task SVE state to the correct size
- * on next use:
+ * Force reallocation of task SVE and SME state to the correct
+ * size on next use:
*/
sve_free(task);
+ if (system_supports_sme() && type == ARM64_VEC_SME)
+ sme_free(task);
task_set_vl(task, type, vl);
@@ -761,6 +926,36 @@ int sve_get_current_vl(void)
return vec_prctl_status(ARM64_VEC_SVE, 0);
}
+#ifdef CONFIG_ARM64_SME
+/* PR_SME_SET_VL */
+int sme_set_current_vl(unsigned long arg)
+{
+ unsigned long vl, flags;
+ int ret;
+
+ vl = arg & PR_SME_VL_LEN_MASK;
+ flags = arg & ~vl;
+
+ if (!system_supports_sme() || is_compat_task())
+ return -EINVAL;
+
+ ret = vec_set_vector_length(current, ARM64_VEC_SME, vl, flags);
+ if (ret)
+ return ret;
+
+ return vec_prctl_status(ARM64_VEC_SME, flags);
+}
+
+/* PR_SME_GET_VL */
+int sme_get_current_vl(void)
+{
+ if (!system_supports_sme() || is_compat_task())
+ return -EINVAL;
+
+ return vec_prctl_status(ARM64_VEC_SME, 0);
+}
+#endif /* CONFIG_ARM64_SME */
+
static void vec_probe_vqs(struct vl_info *info,
DECLARE_BITMAP(map, SVE_VQ_MAX))
{
@@ -770,7 +965,23 @@ static void vec_probe_vqs(struct vl_info *info,
for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
write_vl(info->type, vq - 1); /* self-syncing */
- vl = sve_get_vl();
+
+ switch (info->type) {
+ case ARM64_VEC_SVE:
+ vl = sve_get_vl();
+ break;
+ case ARM64_VEC_SME:
+ vl = sme_get_vl();
+ break;
+ default:
+ vl = 0;
+ break;
+ }
+
+ /* Minimum VL identified? */
+ if (sve_vq_from_vl(vl) > vq)
+ break;
+
vq = sve_vq_from_vl(vl); /* skip intervening lengths */
set_bit(__vq_to_bit(vq), map);
}
@@ -856,21 +1067,25 @@ int vec_verify_vq_map(enum vec_type type)
static void __init sve_efi_setup(void)
{
- struct vl_info *info = &vl_info[ARM64_VEC_SVE];
+ int max_vl = 0;
+ int i;
if (!IS_ENABLED(CONFIG_EFI))
return;
+ for (i = 0; i < ARRAY_SIZE(vl_info); i++)
+ max_vl = max(vl_info[i].max_vl, max_vl);
+
/*
* alloc_percpu() warns and prints a backtrace if this goes wrong.
* This is evidence of a crippled system and we are returning void,
* so no attempt is made to handle this situation here.
*/
- if (!sve_vl_valid(info->max_vl))
+ if (!sve_vl_valid(max_vl))
goto fail;
efi_sve_state = __alloc_percpu(
- SVE_SIG_REGS_SIZE(sve_vq_from_vl(info->max_vl)), SVE_VQ_BYTES);
+ SVE_SIG_REGS_SIZE(sve_vq_from_vl(max_vl)), SVE_VQ_BYTES);
if (!efi_sve_state)
goto fail;
@@ -989,10 +1204,172 @@ void __init sve_setup(void)
void fpsimd_release_task(struct task_struct *dead_task)
{
__sve_free(dead_task);
+ sme_free(dead_task);
}
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+/*
+ * Ensure that task->thread.za_state is allocated and sufficiently large.
+ *
+ * This function should be used only in preparation for replacing
+ * task->thread.za_state with new data. The memory is always zeroed
+ * here to prevent stale data from showing through: this is done in
+ * the interest of testability and predictability, the architecture
+ * guarantees that when ZA is enabled it will be zeroed.
+ */
+void sme_alloc(struct task_struct *task)
+{
+ if (task->thread.za_state) {
+ memset(task->thread.za_state, 0, za_state_size(task));
+ return;
+ }
+
+ /* This could potentially be up to 64K. */
+ task->thread.za_state =
+ kzalloc(za_state_size(task), GFP_KERNEL);
+}
+
+static void sme_free(struct task_struct *task)
+{
+ kfree(task->thread.za_state);
+ task->thread.za_state = NULL;
+}
+
+void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ /* Set priority for all PEs to architecturally defined minimum */
+ write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
+ SYS_SMPRI_EL1);
+
+ /* Allow SME in kernel */
+ write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_SMEN_EL1EN, CPACR_EL1);
+ isb();
+
+ /* Allow EL0 to access TPIDR2 */
+ write_sysreg(read_sysreg(SCTLR_EL1) | SCTLR_ELx_ENTP2, SCTLR_EL1);
+ isb();
+}
+
+/*
+ * This must be called after sme_kernel_enable(), we rely on the
+ * feature table being sorted to ensure this.
+ */
+void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+{
+ /* Allow use of FA64 */
+ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
+ SYS_SMCR_EL1);
+}
+
+/*
+ * Read the pseudo-SMCR used by cpufeatures to identify the supported
+ * vector length.
+ *
+ * Use only if SME is present.
+ * This function clobbers the SME vector length.
+ */
+u64 read_smcr_features(void)
+{
+ u64 smcr;
+ unsigned int vq_max;
+
+ sme_kernel_enable(NULL);
+ sme_smstart_sm();
+
+ /*
+ * Set the maximum possible VL.
+ */
+ write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_LEN_MASK,
+ SYS_SMCR_EL1);
+
+ smcr = read_sysreg_s(SYS_SMCR_EL1);
+ smcr &= ~(u64)SMCR_ELx_LEN_MASK; /* Only the LEN field */
+ vq_max = sve_vq_from_vl(sve_get_vl());
+ smcr |= vq_max - 1; /* set LEN field to maximum effective value */
+
+ sme_smstop_sm();
+
+ return smcr;
+}
+
+void __init sme_setup(void)
+{
+ struct vl_info *info = &vl_info[ARM64_VEC_SME];
+ u64 smcr;
+ int min_bit;
+
+ if (!system_supports_sme())
+ return;
+
+ /*
+ * SME doesn't require any particular vector length be
+ * supported but it does require at least one. We should have
+ * disabled the feature entirely while bringing up CPUs but
+ * let's double check here.
+ */
+ WARN_ON(bitmap_empty(info->vq_map, SVE_VQ_MAX));
+
+ min_bit = find_last_bit(info->vq_map, SVE_VQ_MAX);
+ info->min_vl = sve_vl_from_vq(__bit_to_vq(min_bit));
+
+ smcr = read_sanitised_ftr_reg(SYS_SMCR_EL1);
+ info->max_vl = sve_vl_from_vq((smcr & SMCR_ELx_LEN_MASK) + 1);
+
+ /*
+ * Sanity-check that the max VL we determined through CPU features
+ * corresponds properly to sme_vq_map. If not, do our best:
+ */
+ if (WARN_ON(info->max_vl != find_supported_vector_length(ARM64_VEC_SME,
+ info->max_vl)))
+ info->max_vl = find_supported_vector_length(ARM64_VEC_SME,
+ info->max_vl);
+
+ WARN_ON(info->min_vl > info->max_vl);
+
+ /*
+ * For the default VL, pick the maximum supported value <= 32
+ * (256 bits) if there is one since this is guaranteed not to
+ * grow the signal frame when in streaming mode, otherwise the
+ * minimum available VL will be used.
+ */
+ set_sme_default_vl(find_supported_vector_length(ARM64_VEC_SME, 32));
+
+ pr_info("SME: minimum available vector length %u bytes per vector\n",
+ info->min_vl);
+ pr_info("SME: maximum available vector length %u bytes per vector\n",
+ info->max_vl);
+ pr_info("SME: default vector length %u bytes per vector\n",
+ get_sme_default_vl());
+}
+
+#endif /* CONFIG_ARM64_SME */
+
+static void sve_init_regs(void)
+{
+ /*
+ * Convert the FPSIMD state to SVE, zeroing all the state that
+ * is not shared with FPSIMD. If (as is likely) the current
+ * state is live in the registers then do this there and
+ * update our metadata for the current task including
+ * disabling the trap, otherwise update our in-memory copy.
+ * We are guaranteed to not be in streaming mode, we can only
+ * take a SVE trap when not in streaming mode and we can't be
+ * in streaming mode when taking a SME trap.
+ */
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ unsigned long vq_minus_one =
+ sve_vq_from_vl(task_get_sve_vl(current)) - 1;
+ sve_set_vq(vq_minus_one);
+ sve_flush_live(true, vq_minus_one);
+ fpsimd_bind_task_to_cpu();
+ } else {
+ fpsimd_to_sve(current);
+ }
+}
+
/*
* Trapped SVE access
*
@@ -1024,22 +1401,77 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
WARN_ON(1); /* SVE access shouldn't have trapped */
/*
- * Convert the FPSIMD state to SVE, zeroing all the state that
- * is not shared with FPSIMD. If (as is likely) the current
- * state is live in the registers then do this there and
- * update our metadata for the current task including
- * disabling the trap, otherwise update our in-memory copy.
+ * Even if the task can have used streaming mode we can only
+ * generate SVE access traps in normal SVE mode and
+ * transitioning out of streaming mode may discard any
+ * streaming mode state. Always clear the high bits to avoid
+ * any potential errors tracking what is properly initialised.
+ */
+ sve_init_regs();
+
+ put_cpu_fpsimd_context();
+}
+
+/*
+ * Trapped SME access
+ *
+ * Storage is allocated for the full SVE and SME state, the current
+ * FPSIMD register contents are migrated to SVE if SVE is not already
+ * active, and the access trap is disabled.
+ *
+ * TIF_SME should be clear on entry: otherwise, fpsimd_restore_current_state()
+ * would have disabled the SME access trap for userspace during
+ * ret_to_user, making an SVE access trap impossible in that case.
+ */
+void do_sme_acc(unsigned int esr, struct pt_regs *regs)
+{
+ /* Even if we chose not to use SME, the hardware could still trap: */
+ if (unlikely(!system_supports_sme()) || WARN_ON(is_compat_task())) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ /*
+ * If this not a trap due to SME being disabled then something
+ * is being used in the wrong mode, report as SIGILL.
*/
+ if (ESR_ELx_ISS(esr) != ESR_ELx_SME_ISS_SME_DISABLED) {
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+ return;
+ }
+
+ sve_alloc(current);
+ sme_alloc(current);
+ if (!current->thread.sve_state || !current->thread.za_state) {
+ force_sig(SIGKILL);
+ return;
+ }
+
+ get_cpu_fpsimd_context();
+
+ /* With TIF_SME userspace shouldn't generate any traps */
+ if (test_and_set_thread_flag(TIF_SME))
+ WARN_ON(1);
+
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
unsigned long vq_minus_one =
- sve_vq_from_vl(task_get_sve_vl(current)) - 1;
- sve_set_vq(vq_minus_one);
- sve_flush_live(true, vq_minus_one);
+ sve_vq_from_vl(task_get_sme_vl(current)) - 1;
+ sme_set_vq(vq_minus_one);
+
fpsimd_bind_task_to_cpu();
- } else {
- fpsimd_to_sve(current);
}
+ /*
+ * If SVE was not already active initialise the SVE registers,
+ * any non-shared state between the streaming and regular SVE
+ * registers is architecturally guaranteed to be zeroed when
+ * we enter streaming mode. We do not need to initialize ZA
+ * since ZA must be disabled at this point and enabling ZA is
+ * architecturally defined to zero ZA.
+ */
+ if (system_supports_sve() && !test_thread_flag(TIF_SVE))
+ sve_init_regs();
+
put_cpu_fpsimd_context();
}
@@ -1141,6 +1573,9 @@ static void fpsimd_flush_thread_vl(enum vec_type type)
void fpsimd_flush_thread(void)
{
+ void *sve_state = NULL;
+ void *za_state = NULL;
+
if (!system_supports_fpsimd())
return;
@@ -1152,11 +1587,28 @@ void fpsimd_flush_thread(void)
if (system_supports_sve()) {
clear_thread_flag(TIF_SVE);
- sve_free(current);
+
+ /* Defer kfree() while in atomic context */
+ sve_state = current->thread.sve_state;
+ current->thread.sve_state = NULL;
+
fpsimd_flush_thread_vl(ARM64_VEC_SVE);
}
+ if (system_supports_sme()) {
+ clear_thread_flag(TIF_SME);
+
+ /* Defer kfree() while in atomic context */
+ za_state = current->thread.za_state;
+ current->thread.za_state = NULL;
+
+ fpsimd_flush_thread_vl(ARM64_VEC_SME);
+ current->thread.svcr = 0;
+ }
+
put_cpu_fpsimd_context();
+ kfree(sve_state);
+ kfree(za_state);
}
/*
@@ -1198,22 +1650,34 @@ static void fpsimd_bind_task_to_cpu(void)
WARN_ON(!system_supports_fpsimd());
last->st = &current->thread.uw.fpsimd_state;
last->sve_state = current->thread.sve_state;
+ last->za_state = current->thread.za_state;
last->sve_vl = task_get_sve_vl(current);
+ last->sme_vl = task_get_sme_vl(current);
+ last->svcr = &current->thread.svcr;
current->thread.fpsimd_cpu = smp_processor_id();
+ /*
+ * Toggle SVE and SME trapping for userspace if needed, these
+ * are serialsied by ret_to_user().
+ */
+ if (system_supports_sme()) {
+ if (test_thread_flag(TIF_SME))
+ sme_user_enable();
+ else
+ sme_user_disable();
+ }
+
if (system_supports_sve()) {
- /* Toggle SVE trapping for userspace if needed */
if (test_thread_flag(TIF_SVE))
sve_user_enable();
else
sve_user_disable();
-
- /* Serialised by exception return to user */
}
}
void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
- unsigned int sve_vl)
+ unsigned int sve_vl, void *za_state,
+ unsigned int sme_vl, u64 *svcr)
{
struct fpsimd_last_state_struct *last =
this_cpu_ptr(&fpsimd_last_state);
@@ -1222,8 +1686,11 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
WARN_ON(!in_softirq() && !irqs_disabled());
last->st = st;
+ last->svcr = svcr;
last->sve_state = sve_state;
+ last->za_state = za_state;
last->sve_vl = sve_vl;
+ last->sme_vl = sme_vl;
}
/*
@@ -1320,6 +1787,15 @@ static void fpsimd_flush_cpu_state(void)
{
WARN_ON(!system_supports_fpsimd());
__this_cpu_write(fpsimd_last_state.st, NULL);
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
@@ -1397,6 +1873,7 @@ EXPORT_SYMBOL(kernel_neon_end);
static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
static DEFINE_PER_CPU(bool, efi_sve_state_used);
+static DEFINE_PER_CPU(bool, efi_sm_state);
/*
* EFI runtime services support functions
@@ -1431,12 +1908,28 @@ void __efi_fpsimd_begin(void)
*/
if (system_supports_sve() && likely(efi_sve_state)) {
char *sve_state = this_cpu_ptr(efi_sve_state);
+ bool ffr = true;
+ u64 svcr;
__this_cpu_write(efi_sve_state_used, true);
+ if (system_supports_sme()) {
+ svcr = read_sysreg_s(SYS_SVCR_EL0);
+
+ if (!system_supports_fa64())
+ ffr = svcr & SYS_SVCR_EL0_SM_MASK;
+
+ __this_cpu_write(efi_sm_state, ffr);
+ }
+
sve_save_state(sve_state + sve_ffr_offset(sve_max_vl()),
&this_cpu_ptr(&efi_fpsimd_state)->fpsr,
- true);
+ ffr);
+
+ if (system_supports_sme())
+ sysreg_clear_set_s(SYS_SVCR_EL0,
+ SYS_SVCR_EL0_SM_MASK, 0);
+
} else {
fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
}
@@ -1459,11 +1952,26 @@ void __efi_fpsimd_end(void)
if (system_supports_sve() &&
likely(__this_cpu_read(efi_sve_state_used))) {
char const *sve_state = this_cpu_ptr(efi_sve_state);
+ bool ffr = true;
+
+ /*
+ * Restore streaming mode; EFI calls are
+ * normal function calls so should not return in
+ * streaming mode.
+ */
+ if (system_supports_sme()) {
+ if (__this_cpu_read(efi_sm_state)) {
+ sysreg_clear_set_s(SYS_SVCR_EL0,
+ 0,
+ SYS_SVCR_EL0_SM_MASK);
+ if (!system_supports_fa64())
+ ffr = efi_sm_state;
+ }
+ }
- sve_set_vq(sve_vq_from_vl(sve_get_vl()) - 1);
sve_load_state(sve_state + sve_ffr_offset(sve_max_vl()),
&this_cpu_ptr(&efi_fpsimd_state)->fpsr,
- true);
+ ffr);
__this_cpu_write(efi_sve_state_used, false);
} else {
@@ -1538,6 +2046,13 @@ static int __init fpsimd_init(void)
if (!cpu_have_named_feature(ASIMD))
pr_notice("Advanced SIMD is not implemented\n");
- return sve_sysctl_init();
+
+ if (cpu_have_named_feature(SME) && !cpu_have_named_feature(SVE))
+ pr_notice("SME is implemented but not SVE\n");
+
+ sve_sysctl_init();
+ sme_sysctl_init();
+
+ return 0;
}
core_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 4506c4a90ac1..f447c4a36f69 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -268,6 +268,22 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
}
#ifdef CONFIG_DYNAMIC_FTRACE
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+ /*
+ * When DYNAMIC_FTRACE_WITH_REGS is selected, `fregs` can never be NULL
+ * and arch_ftrace_get_regs(fregs) will always give a non-NULL pt_regs
+ * in which we can safely modify the LR.
+ */
+ struct pt_regs *regs = arch_ftrace_get_regs(fregs);
+ unsigned long *parent = (unsigned long *)&procedure_link_pointer(regs);
+
+ prepare_ftrace_return(ip, parent, frame_pointer(regs));
+}
+#else
/*
* Turn on/off the call to ftrace_graph_caller() in ftrace_caller()
* depending on @enable.
@@ -297,5 +313,6 @@ int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index e16b248699d5..19c2d487cb08 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -329,8 +329,13 @@ bool crash_is_nosave(unsigned long pfn)
/* in reserved memory? */
addr = __pfn_to_phys(pfn);
- if ((addr < crashk_res.start) || (crashk_res.end < addr))
- return false;
+ if ((addr < crashk_res.start) || (crashk_res.end < addr)) {
+ if (!crashk_low_res.end)
+ return false;
+
+ if ((addr < crashk_low_res.start) || (crashk_low_res.end < addr))
+ return false;
+ }
if (!kexec_crash_image)
return true;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 59c648d51848..889951291cc0 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -65,10 +65,18 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
/* Exclude crashkernel region */
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ if (crashk_low_res.end) {
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret)
+ goto out;
+ }
- if (!ret)
- ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+out:
kfree(cmem);
return ret;
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 78b3e0f8e997..35697a09926f 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -15,6 +15,7 @@
#include <linux/swapops.h>
#include <linux/thread_info.h>
#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/barrier.h>
@@ -543,3 +544,32 @@ static int register_mte_tcf_preferred_sysctl(void)
return 0;
}
subsys_initcall(register_mte_tcf_preferred_sysctl);
+
+/*
+ * Return 0 on success, the number of bytes not probed otherwise.
+ */
+size_t mte_probe_user_range(const char __user *uaddr, size_t size)
+{
+ const char __user *end = uaddr + size;
+ int err = 0;
+ char val;
+
+ __raw_get_user(val, uaddr, err);
+ if (err)
+ return size;
+
+ uaddr = PTR_ALIGN(uaddr, MTE_GRANULE_SIZE);
+ while (uaddr < end) {
+ /*
+ * A read is sufficient for mte, the caller should have probed
+ * for the pte write permission if required.
+ */
+ __raw_get_user(val, uaddr, err);
+ if (err)
+ return end - uaddr;
+ uaddr += MTE_GRANULE_SIZE;
+ }
+ (void)val;
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7fa97df55e3a..9734c9fb1a32 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -250,6 +250,8 @@ void show_regs(struct pt_regs *regs)
static void tls_thread_flush(void)
{
write_sysreg(0, tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(0, SYS_TPIDR2_EL0);
if (is_compat_task()) {
current->thread.uw.tp_value = 0;
@@ -298,16 +300,42 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
/*
* Detach src's sve_state (if any) from dst so that it does not
- * get erroneously used or freed prematurely. dst's sve_state
+ * get erroneously used or freed prematurely. dst's copies
* will be allocated on demand later on if dst uses SVE.
* For consistency, also clear TIF_SVE here: this could be done
* later in copy_process(), but to avoid tripping up future
- * maintainers it is best not to leave TIF_SVE and sve_state in
+ * maintainers it is best not to leave TIF flags and buffers in
* an inconsistent state, even temporarily.
*/
dst->thread.sve_state = NULL;
clear_tsk_thread_flag(dst, TIF_SVE);
+ /*
+ * In the unlikely event that we create a new thread with ZA
+ * enabled we should retain the ZA state so duplicate it here.
+ * This may be shortly freed if we exec() or if CLONE_SETTLS
+ * but it's simpler to do it here. To avoid confusing the rest
+ * of the code ensure that we have a sve_state allocated
+ * whenever za_state is allocated.
+ */
+ if (thread_za_enabled(&src->thread)) {
+ dst->thread.sve_state = kzalloc(sve_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.sve_state)
+ return -ENOMEM;
+ dst->thread.za_state = kmemdup(src->thread.za_state,
+ za_state_size(src),
+ GFP_KERNEL);
+ if (!dst->thread.za_state) {
+ kfree(dst->thread.sve_state);
+ dst->thread.sve_state = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ dst->thread.za_state = NULL;
+ clear_tsk_thread_flag(dst, TIF_SME);
+ }
+
/* clear any pending asynchronous tag fault raised by the parent */
clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
@@ -343,6 +371,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
* out-of-sync with the saved value.
*/
*task_user_tls(p) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2())
+ p->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
if (stack_start) {
if (is_compat_thread(task_thread_info(p)))
@@ -353,10 +383,12 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
/*
* If a TLS pointer was passed to clone, use it for the new
- * thread.
+ * thread. We also reset TPIDR2 if it's in use.
*/
- if (clone_flags & CLONE_SETTLS)
+ if (clone_flags & CLONE_SETTLS) {
p->thread.uw.tp_value = tls;
+ p->thread.tpidr2_el0 = 0;
+ }
} else {
/*
* A kthread has no context to ERET to, so ensure any buggy
@@ -387,6 +419,8 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
void tls_preserve_current_state(void)
{
*task_user_tls(current) = read_sysreg(tpidr_el0);
+ if (system_supports_tpidr2() && !is_compat_task())
+ current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
}
static void tls_thread_switch(struct task_struct *next)
@@ -399,6 +433,8 @@ static void tls_thread_switch(struct task_struct *next)
write_sysreg(0, tpidrro_el0);
write_sysreg(*task_user_tls(next), tpidr_el0);
+ if (system_supports_tpidr2())
+ write_sysreg_s(next->thread.tpidr2_el0, SYS_TPIDR2_EL0);
}
/*
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 230a47b9189e..60ebc3060cf1 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -713,21 +713,51 @@ static int system_call_set(struct task_struct *target,
#ifdef CONFIG_ARM64_SVE
static void sve_init_header_from_task(struct user_sve_header *header,
- struct task_struct *target)
+ struct task_struct *target,
+ enum vec_type type)
{
unsigned int vq;
+ bool active;
+ bool fpsimd_only;
+ enum vec_type task_type;
memset(header, 0, sizeof(*header));
- header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
- SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
- if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
- header->flags |= SVE_PT_VL_INHERIT;
+ /* Check if the requested registers are active for the task */
+ if (thread_sm_enabled(&target->thread))
+ task_type = ARM64_VEC_SME;
+ else
+ task_type = ARM64_VEC_SVE;
+ active = (task_type == type);
+
+ switch (type) {
+ case ARM64_VEC_SVE:
+ if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+ fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
+ break;
+ case ARM64_VEC_SME:
+ if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+ header->flags |= SVE_PT_VL_INHERIT;
+ fpsimd_only = false;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
- header->vl = task_get_sve_vl(target);
+ if (active) {
+ if (fpsimd_only) {
+ header->flags |= SVE_PT_REGS_FPSIMD;
+ } else {
+ header->flags |= SVE_PT_REGS_SVE;
+ }
+ }
+
+ header->vl = task_get_vl(target, type);
vq = sve_vq_from_vl(header->vl);
- header->max_vl = sve_max_vl();
+ header->max_vl = vec_max_vl(type);
header->size = SVE_PT_SIZE(vq, header->flags);
header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
SVE_PT_REGS_SVE);
@@ -738,19 +768,17 @@ static unsigned int sve_size_from_header(struct user_sve_header const *header)
return ALIGN(header->size, SVE_VQ_BYTES);
}
-static int sve_get(struct task_struct *target,
- const struct user_regset *regset,
- struct membuf to)
+static int sve_get_common(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to,
+ enum vec_type type)
{
struct user_sve_header header;
unsigned int vq;
unsigned long start, end;
- if (!system_supports_sve())
- return -EINVAL;
-
/* Header */
- sve_init_header_from_task(&header, target);
+ sve_init_header_from_task(&header, target, type);
vq = sve_vq_from_vl(header.vl);
membuf_write(&to, &header, sizeof(header));
@@ -758,49 +786,61 @@ static int sve_get(struct task_struct *target,
if (target == current)
fpsimd_preserve_current_state();
- /* Registers: FPSIMD-only case */
-
BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
- if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
+ BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
+
+ switch ((header.flags & SVE_PT_REGS_MASK)) {
+ case SVE_PT_REGS_FPSIMD:
return __fpr_get(target, regset, to);
- /* Otherwise: full SVE case */
+ case SVE_PT_REGS_SVE:
+ start = SVE_PT_SVE_OFFSET;
+ end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
+ membuf_write(&to, target->thread.sve_state, end - start);
- BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
- start = SVE_PT_SVE_OFFSET;
- end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
- membuf_write(&to, target->thread.sve_state, end - start);
+ start = end;
+ end = SVE_PT_SVE_FPSR_OFFSET(vq);
+ membuf_zero(&to, end - start);
- start = end;
- end = SVE_PT_SVE_FPSR_OFFSET(vq);
- membuf_zero(&to, end - start);
+ /*
+ * Copy fpsr, and fpcr which must follow contiguously in
+ * struct fpsimd_state:
+ */
+ start = end;
+ end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
+ membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr,
+ end - start);
- /*
- * Copy fpsr, and fpcr which must follow contiguously in
- * struct fpsimd_state:
- */
- start = end;
- end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
- membuf_write(&to, &target->thread.uw.fpsimd_state.fpsr, end - start);
+ start = end;
+ end = sve_size_from_header(&header);
+ return membuf_zero(&to, end - start);
- start = end;
- end = sve_size_from_header(&header);
- return membuf_zero(&to, end - start);
+ default:
+ return 0;
+ }
}
-static int sve_set(struct task_struct *target,
+static int sve_get(struct task_struct *target,
const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
+ struct membuf to)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ return sve_get_common(target, regset, to, ARM64_VEC_SVE);
+}
+
+static int sve_set_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ enum vec_type type)
{
int ret;
struct user_sve_header header;
unsigned int vq;
unsigned long start, end;
- if (!system_supports_sve())
- return -EINVAL;
-
/* Header */
if (count < sizeof(header))
return -EINVAL;
@@ -813,13 +853,37 @@ static int sve_set(struct task_struct *target,
* Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
* vec_set_vector_length(), which will also validate them for us:
*/
- ret = vec_set_vector_length(target, ARM64_VEC_SVE, header.vl,
+ ret = vec_set_vector_length(target, type, header.vl,
((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
if (ret)
goto out;
/* Actual VL set may be less than the user asked for: */
- vq = sve_vq_from_vl(task_get_sve_vl(target));
+ vq = sve_vq_from_vl(task_get_vl(target, type));
+
+ /* Enter/exit streaming mode */
+ if (system_supports_sme()) {
+ u64 old_svcr = target->thread.svcr;
+
+ switch (type) {
+ case ARM64_VEC_SVE:
+ target->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
+ break;
+ case ARM64_VEC_SME:
+ target->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ /*
+ * If we switched then invalidate any existing SVE
+ * state and ensure there's storage.
+ */
+ if (target->thread.svcr != old_svcr)
+ sve_alloc(target);
+ }
/* Registers: FPSIMD-only case */
@@ -828,10 +892,15 @@ static int sve_set(struct task_struct *target,
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
SVE_PT_FPSIMD_OFFSET);
clear_tsk_thread_flag(target, TIF_SVE);
+ if (type == ARM64_VEC_SME)
+ fpsimd_force_sync_to_sve(target);
goto out;
}
- /* Otherwise: full SVE case */
+ /*
+ * Otherwise: no registers or full SVE case. For backwards
+ * compatibility reasons we treat empty flags as SVE registers.
+ */
/*
* If setting a different VL from the requested VL and there is
@@ -852,8 +921,9 @@ static int sve_set(struct task_struct *target,
/*
* Ensure target->thread.sve_state is up to date with target's
- * FPSIMD regs, so that a short copyin leaves trailing registers
- * unmodified.
+ * FPSIMD regs, so that a short copyin leaves trailing
+ * registers unmodified. Always enable SVE even if going into
+ * streaming mode.
*/
fpsimd_sync_to_sve(target);
set_tsk_thread_flag(target, TIF_SVE);
@@ -889,8 +959,181 @@ out:
return ret;
}
+static int sve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (!system_supports_sve())
+ return -EINVAL;
+
+ return sve_set_common(target, regset, pos, count, kbuf, ubuf,
+ ARM64_VEC_SVE);
+}
+
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static int ssve_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ return sve_get_common(target, regset, to, ARM64_VEC_SME);
+}
+
+static int ssve_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ return sve_set_common(target, regset, pos, count, kbuf, ubuf,
+ ARM64_VEC_SME);
+}
+
+static int za_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct user_za_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ /* Header */
+ memset(&header, 0, sizeof(header));
+
+ if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
+ header.flags |= ZA_PT_VL_INHERIT;
+
+ header.vl = task_get_sme_vl(target);
+ vq = sve_vq_from_vl(header.vl);
+ header.max_vl = sme_max_vl();
+ header.max_size = ZA_PT_SIZE(vq);
+
+ /* If ZA is not active there is only the header */
+ if (thread_za_enabled(&target->thread))
+ header.size = ZA_PT_SIZE(vq);
+ else
+ header.size = ZA_PT_ZA_OFFSET;
+
+ membuf_write(&to, &header, sizeof(header));
+
+ BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
+ end = ZA_PT_ZA_OFFSET;
+
+ if (target == current)
+ fpsimd_preserve_current_state();
+
+ /* Any register data to include? */
+ if (thread_za_enabled(&target->thread)) {
+ start = end;
+ end = ZA_PT_SIZE(vq);
+ membuf_write(&to, target->thread.za_state, end - start);
+ }
+
+ /* Zero any trailing padding */
+ start = end;
+ end = ALIGN(header.size, SVE_VQ_BYTES);
+ return membuf_zero(&to, end - start);
+}
+
+static int za_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct user_za_header header;
+ unsigned int vq;
+ unsigned long start, end;
+
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ /* Header */
+ if (count < sizeof(header))
+ return -EINVAL;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
+ 0, sizeof(header));
+ if (ret)
+ goto out;
+
+ /*
+ * All current ZA_PT_* flags are consumed by
+ * vec_set_vector_length(), which will also validate them for
+ * us:
+ */
+ ret = vec_set_vector_length(target, ARM64_VEC_SME, header.vl,
+ ((unsigned long)header.flags) << 16);
+ if (ret)
+ goto out;
+
+ /* Actual VL set may be less than the user asked for: */
+ vq = sve_vq_from_vl(task_get_sme_vl(target));
+
+ /* Ensure there is some SVE storage for streaming mode */
+ if (!target->thread.sve_state) {
+ sve_alloc(target);
+ if (!target->thread.sve_state) {
+ clear_thread_flag(TIF_SME);
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /* Allocate/reinit ZA storage */
+ sme_alloc(target);
+ if (!target->thread.za_state) {
+ ret = -ENOMEM;
+ clear_tsk_thread_flag(target, TIF_SME);
+ goto out;
+ }
+
+ /* If there is no data then disable ZA */
+ if (!count) {
+ target->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
+ goto out;
+ }
+
+ /*
+ * If setting a different VL from the requested VL and there is
+ * register data, the data layout will be wrong: don't even
+ * try to set the registers in this case.
+ */
+ if (vq != sve_vq_from_vl(header.vl)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ BUILD_BUG_ON(ZA_PT_ZA_OFFSET != sizeof(header));
+ start = ZA_PT_ZA_OFFSET;
+ end = ZA_PT_SIZE(vq);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.za_state,
+ start, end);
+ if (ret)
+ goto out;
+
+ /* Mark ZA as active and let userspace use it */
+ set_tsk_thread_flag(target, TIF_SME);
+ target->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
+
+out:
+ fpsimd_flush_task_state(target);
+ return ret;
+}
+
+#endif /* CONFIG_ARM64_SME */
+
#ifdef CONFIG_ARM64_PTR_AUTH
static int pac_mask_get(struct task_struct *target,
const struct user_regset *regset,
@@ -1108,6 +1351,10 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_SVE
REGSET_SVE,
#endif
+#ifdef CONFIG_ARM64_SVE
+ REGSET_SSVE,
+ REGSET_ZA,
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
REGSET_PAC_ENABLED_KEYS,
@@ -1188,6 +1435,33 @@ static const struct user_regset aarch64_regsets[] = {
.set = sve_set,
},
#endif
+#ifdef CONFIG_ARM64_SME
+ [REGSET_SSVE] = { /* Streaming mode SVE */
+ .core_note_type = NT_ARM_SSVE,
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(SME_VQ_MAX, SVE_PT_REGS_SVE),
+ SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .regset_get = ssve_get,
+ .set = ssve_set,
+ },
+ [REGSET_ZA] = { /* SME ZA */
+ .core_note_type = NT_ARM_ZA,
+ /*
+ * ZA is a single register but it's variably sized and
+ * the ptrace core requires that the size of any data
+ * be an exact multiple of the configured register
+ * size so report as though we had SVE_VQ_BYTES
+ * registers. These values aren't exposed to
+ * userspace.
+ */
+ .n = DIV_ROUND_UP(ZA_PT_SIZE(SME_VQ_MAX), SVE_VQ_BYTES),
+ .size = SVE_VQ_BYTES,
+ .align = SVE_VQ_BYTES,
+ .regset_get = za_get,
+ .set = za_set,
+ },
+#endif
#ifdef CONFIG_ARM64_PTR_AUTH
[REGSET_PAC_MASK] = {
.core_note_type = NT_ARM_PAC_MASK,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 3505789cf4bd..fea3223704b6 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -225,6 +225,8 @@ static void __init request_standard_resources(void)
kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1);
+ insert_resource(&iomem_resource, &kernel_code);
+ insert_resource(&iomem_resource, &kernel_data);
num_standard_resources = memblock.memory.cnt;
res_size = num_standard_resources * sizeof(*standard_resources);
@@ -246,20 +248,7 @@ static void __init request_standard_resources(void)
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
}
- request_resource(&iomem_resource, res);
-
- if (kernel_code.start >= res->start &&
- kernel_code.end <= res->end)
- request_resource(res, &kernel_code);
- if (kernel_data.start >= res->start &&
- kernel_data.end <= res->end)
- request_resource(res, &kernel_data);
-#ifdef CONFIG_KEXEC_CORE
- /* Userspace will find "Crash kernel" region in /proc/iomem. */
- if (crashk_res.end && crashk_res.start >= res->start &&
- crashk_res.end <= res->end)
- request_resource(res, &crashk_res);
-#endif
+ insert_resource(&iomem_resource, res);
}
}
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 4a4122ef6f39..2295948d97fd 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -56,6 +56,7 @@ struct rt_sigframe_user_layout {
unsigned long fpsimd_offset;
unsigned long esr_offset;
unsigned long sve_offset;
+ unsigned long za_offset;
unsigned long extra_offset;
unsigned long end_offset;
};
@@ -218,6 +219,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
struct user_ctxs {
struct fpsimd_context __user *fpsimd;
struct sve_context __user *sve;
+ struct za_context __user *za;
};
#ifdef CONFIG_ARM64_SVE
@@ -226,11 +228,17 @@ static int preserve_sve_context(struct sve_context __user *ctx)
{
int err = 0;
u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ u16 flags = 0;
unsigned int vl = task_get_sve_vl(current);
unsigned int vq = 0;
- if (test_thread_flag(TIF_SVE))
+ if (thread_sm_enabled(&current->thread)) {
+ vl = task_get_sme_vl(current);
vq = sve_vq_from_vl(vl);
+ flags |= SVE_SIG_FLAG_SM;
+ } else if (test_thread_flag(TIF_SVE)) {
+ vq = sve_vq_from_vl(vl);
+ }
memset(reserved, 0, sizeof(reserved));
@@ -238,6 +246,7 @@ static int preserve_sve_context(struct sve_context __user *ctx)
__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
&ctx->head.size, err);
__put_user_error(vl, &ctx->vl, err);
+ __put_user_error(flags, &ctx->flags, err);
BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
@@ -258,18 +267,28 @@ static int preserve_sve_context(struct sve_context __user *ctx)
static int restore_sve_fpsimd_context(struct user_ctxs *user)
{
int err;
- unsigned int vq;
+ unsigned int vl, vq;
struct user_fpsimd_state fpsimd;
struct sve_context sve;
if (__copy_from_user(&sve, user->sve, sizeof(sve)))
return -EFAULT;
- if (sve.vl != task_get_sve_vl(current))
+ if (sve.flags & SVE_SIG_FLAG_SM) {
+ if (!system_supports_sme())
+ return -EINVAL;
+
+ vl = task_get_sme_vl(current);
+ } else {
+ vl = task_get_sve_vl(current);
+ }
+
+ if (sve.vl != vl)
return -EINVAL;
if (sve.head.size <= sizeof(*user->sve)) {
clear_thread_flag(TIF_SVE);
+ current->thread.svcr &= ~SYS_SVCR_EL0_SM_MASK;
goto fpsimd_only;
}
@@ -301,7 +320,10 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
if (err)
return -EFAULT;
- set_thread_flag(TIF_SVE);
+ if (sve.flags & SVE_SIG_FLAG_SM)
+ current->thread.svcr |= SYS_SVCR_EL0_SM_MASK;
+ else
+ set_thread_flag(TIF_SVE);
fpsimd_only:
/* copy the FP and status/control registers */
@@ -326,6 +348,101 @@ extern int restore_sve_fpsimd_context(struct user_ctxs *user);
#endif /* ! CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_SME
+
+static int preserve_za_context(struct za_context __user *ctx)
+{
+ int err = 0;
+ u16 reserved[ARRAY_SIZE(ctx->__reserved)];
+ unsigned int vl = task_get_sme_vl(current);
+ unsigned int vq;
+
+ if (thread_za_enabled(&current->thread))
+ vq = sve_vq_from_vl(vl);
+ else
+ vq = 0;
+
+ memset(reserved, 0, sizeof(reserved));
+
+ __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
+ __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
+ &ctx->head.size, err);
+ __put_user_error(vl, &ctx->vl, err);
+ BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
+ err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
+
+ if (vq) {
+ /*
+ * This assumes that the ZA state has already been saved to
+ * the task struct by calling the function
+ * fpsimd_signal_preserve_current_state().
+ */
+ err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
+ current->thread.za_state,
+ ZA_SIG_REGS_SIZE(vq));
+ }
+
+ return err ? -EFAULT : 0;
+}
+
+static int restore_za_context(struct user_ctxs __user *user)
+{
+ int err;
+ unsigned int vq;
+ struct za_context za;
+
+ if (__copy_from_user(&za, user->za, sizeof(za)))
+ return -EFAULT;
+
+ if (za.vl != task_get_sme_vl(current))
+ return -EINVAL;
+
+ if (za.head.size <= sizeof(*user->za)) {
+ current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
+ return 0;
+ }
+
+ vq = sve_vq_from_vl(za.vl);
+
+ if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
+ return -EINVAL;
+
+ /*
+ * Careful: we are about __copy_from_user() directly into
+ * thread.za_state with preemption enabled, so protection is
+ * needed to prevent a racing context switch from writing stale
+ * registers back over the new data.
+ */
+
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+ sme_alloc(current);
+ if (!current->thread.za_state) {
+ current->thread.svcr &= ~SYS_SVCR_EL0_ZA_MASK;
+ clear_thread_flag(TIF_SME);
+ return -ENOMEM;
+ }
+
+ err = __copy_from_user(current->thread.za_state,
+ (char __user const *)user->za +
+ ZA_SIG_REGS_OFFSET,
+ ZA_SIG_REGS_SIZE(vq));
+ if (err)
+ return -EFAULT;
+
+ set_thread_flag(TIF_SME);
+ current->thread.svcr |= SYS_SVCR_EL0_ZA_MASK;
+
+ return 0;
+}
+#else /* ! CONFIG_ARM64_SME */
+
+/* Turn any non-optimised out attempts to use these into a link error: */
+extern int preserve_za_context(void __user *ctx);
+extern int restore_za_context(struct user_ctxs *user);
+
+#endif /* ! CONFIG_ARM64_SME */
static int parse_user_sigframe(struct user_ctxs *user,
struct rt_sigframe __user *sf)
@@ -340,6 +457,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->fpsimd = NULL;
user->sve = NULL;
+ user->za = NULL;
if (!IS_ALIGNED((unsigned long)base, 16))
goto invalid;
@@ -393,7 +511,7 @@ static int parse_user_sigframe(struct user_ctxs *user,
break;
case SVE_MAGIC:
- if (!system_supports_sve())
+ if (!system_supports_sve() && !system_supports_sme())
goto invalid;
if (user->sve)
@@ -405,6 +523,19 @@ static int parse_user_sigframe(struct user_ctxs *user,
user->sve = (struct sve_context __user *)head;
break;
+ case ZA_MAGIC:
+ if (!system_supports_sme())
+ goto invalid;
+
+ if (user->za)
+ goto invalid;
+
+ if (size < sizeof(*user->za))
+ goto invalid;
+
+ user->za = (struct za_context __user *)head;
+ break;
+
case EXTRA_MAGIC:
if (have_extra_context)
goto invalid;
@@ -528,6 +659,9 @@ static int restore_sigframe(struct pt_regs *regs,
}
}
+ if (err == 0 && system_supports_sme() && user.za)
+ err = restore_za_context(&user);
+
return err;
}
@@ -594,11 +728,12 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
if (system_supports_sve()) {
unsigned int vq = 0;
- if (add_all || test_thread_flag(TIF_SVE)) {
- int vl = sve_max_vl();
+ if (add_all || test_thread_flag(TIF_SVE) ||
+ thread_sm_enabled(&current->thread)) {
+ int vl = max(sve_max_vl(), sme_max_vl());
if (!add_all)
- vl = task_get_sve_vl(current);
+ vl = thread_get_cur_vl(&current->thread);
vq = sve_vq_from_vl(vl);
}
@@ -609,6 +744,24 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
return err;
}
+ if (system_supports_sme()) {
+ unsigned int vl;
+ unsigned int vq = 0;
+
+ if (add_all)
+ vl = sme_max_vl();
+ else
+ vl = task_get_sme_vl(current);
+
+ if (thread_za_enabled(&current->thread))
+ vq = sve_vq_from_vl(vl);
+
+ err = sigframe_alloc(user, &user->za_offset,
+ ZA_SIG_CONTEXT_SIZE(vq));
+ if (err)
+ return err;
+ }
+
return sigframe_alloc_end(user);
}
@@ -649,13 +802,21 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
}
- /* Scalable Vector Extension state, if present */
- if (system_supports_sve() && err == 0 && user->sve_offset) {
+ /* Scalable Vector Extension state (including streaming), if present */
+ if ((system_supports_sve() || system_supports_sme()) &&
+ err == 0 && user->sve_offset) {
struct sve_context __user *sve_ctx =
apply_user_offset(user, user->sve_offset);
err |= preserve_sve_context(sve_ctx);
}
+ /* ZA state if present */
+ if (system_supports_sme() && err == 0 && user->za_offset) {
+ struct za_context __user *za_ctx =
+ apply_user_offset(user, user->za_offset);
+ err |= preserve_za_context(za_ctx);
+ }
+
if (err == 0 && user->extra_offset) {
char __user *sfp = (char __user *)user->sigframe;
char __user *userp =
@@ -759,6 +920,13 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* TCO (Tag Check Override) always cleared for signal handlers */
regs->pstate &= ~PSR_TCO_BIT;
+ /* Signal handlers are invoked with ZA and streaming mode disabled */
+ if (system_supports_sme()) {
+ current->thread.svcr &= ~(SYS_SVCR_EL0_ZA_MASK |
+ SYS_SVCR_EL0_SM_MASK);
+ sme_smstop();
+ }
+
if (ka->sa.sa_flags & SA_RESTORER)
sigtramp = ka->sa.sa_restorer;
else
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index e4103e085681..0467cb79f080 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -19,43 +19,60 @@
#include <asm/stacktrace.h>
/*
- * AArch64 PCS assigns the frame pointer to x29.
+ * A snapshot of a frame record or fp/lr register values, along with some
+ * accounting information necessary for robust unwinding.
*
- * A simple function prologue looks like this:
- * sub sp, sp, #0x10
- * stp x29, x30, [sp]
- * mov x29, sp
+ * @fp: The fp value in the frame record (or the real fp)
+ * @pc: The lr value in the frame record (or the real lr)
*
- * A simple function epilogue looks like this:
- * mov sp, x29
- * ldp x29, x30, [sp]
- * add sp, sp, #0x10
+ * @stacks_done: Stacks which have been entirely unwound, for which it is no
+ * longer valid to unwind to.
+ *
+ * @prev_fp: The fp that pointed to this frame record, or a synthetic value
+ * of 0. This is used to ensure that within a stack, each
+ * subsequent frame record is at an increasing address.
+ * @prev_type: The type of stack this frame record was on, or a synthetic
+ * value of STACK_TYPE_UNKNOWN. This is used to detect a
+ * transition from one stack to another.
+ *
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
+ * associated with the most recently encountered replacement lr
+ * value.
*/
+struct unwind_state {
+ unsigned long fp;
+ unsigned long pc;
+ DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
+ unsigned long prev_fp;
+ enum stack_type prev_type;
+#ifdef CONFIG_KRETPROBES
+ struct llist_node *kr_cur;
+#endif
+};
-
-static notrace void start_backtrace(struct stackframe *frame, unsigned long fp,
- unsigned long pc)
+static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
+ unsigned long pc)
{
- frame->fp = fp;
- frame->pc = pc;
+ state->fp = fp;
+ state->pc = pc;
#ifdef CONFIG_KRETPROBES
- frame->kr_cur = NULL;
+ state->kr_cur = NULL;
#endif
/*
* Prime the first unwind.
*
- * In unwind_frame() we'll check that the FP points to a valid stack,
+ * In unwind_next() we'll check that the FP points to a valid stack,
* which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
* treated as a transition to whichever stack that happens to be. The
* prev_fp value won't be used, but we set it to 0 such that it is
* definitely not an accessible stack address.
*/
- bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
- frame->prev_fp = 0;
- frame->prev_type = STACK_TYPE_UNKNOWN;
+ bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
+ state->prev_fp = 0;
+ state->prev_type = STACK_TYPE_UNKNOWN;
}
-NOKPROBE_SYMBOL(start_backtrace);
+NOKPROBE_SYMBOL(unwind_init);
/*
* Unwind from one frame record (A) to the next frame record (B).
@@ -64,15 +81,12 @@ NOKPROBE_SYMBOL(start_backtrace);
* records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B.
*/
-static int notrace unwind_frame(struct task_struct *tsk,
- struct stackframe *frame)
+static int notrace unwind_next(struct task_struct *tsk,
+ struct unwind_state *state)
{
- unsigned long fp = frame->fp;
+ unsigned long fp = state->fp;
struct stack_info info;
- if (!tsk)
- tsk = current;
-
/* Final frame; nothing to unwind */
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
@@ -83,7 +97,7 @@ static int notrace unwind_frame(struct task_struct *tsk,
if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
- if (test_bit(info.type, frame->stacks_done))
+ if (test_bit(info.type, state->stacks_done))
return -EINVAL;
/*
@@ -99,27 +113,27 @@ static int notrace unwind_frame(struct task_struct *tsk,
* stack to another, it's never valid to unwind back to that first
* stack.
*/
- if (info.type == frame->prev_type) {
- if (fp <= frame->prev_fp)
+ if (info.type == state->prev_type) {
+ if (fp <= state->prev_fp)
return -EINVAL;
} else {
- set_bit(frame->prev_type, frame->stacks_done);
+ set_bit(state->prev_type, state->stacks_done);
}
/*
* Record this frame record's values and location. The prev_fp and
- * prev_type are only meaningful to the next unwind_frame() invocation.
+ * prev_type are only meaningful to the next unwind_next() invocation.
*/
- frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
- frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
- frame->prev_fp = fp;
- frame->prev_type = info.type;
+ state->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+ state->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ state->prev_fp = fp;
+ state->prev_type = info.type;
- frame->pc = ptrauth_strip_insn_pac(frame->pc);
+ state->pc = ptrauth_strip_insn_pac(state->pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (frame->pc == (unsigned long)return_to_handler)) {
+ (state->pc == (unsigned long)return_to_handler)) {
unsigned long orig_pc;
/*
* This is a case where function graph tracer has
@@ -127,37 +141,37 @@ static int notrace unwind_frame(struct task_struct *tsk,
* to hook a function return.
* So replace it to an original value.
*/
- orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
- (void *)frame->fp);
- if (WARN_ON_ONCE(frame->pc == orig_pc))
+ orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
+ (void *)state->fp);
+ if (WARN_ON_ONCE(state->pc == orig_pc))
return -EINVAL;
- frame->pc = orig_pc;
+ state->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
- if (is_kretprobe_trampoline(frame->pc))
- frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
+ if (is_kretprobe_trampoline(state->pc))
+ state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
#endif
return 0;
}
-NOKPROBE_SYMBOL(unwind_frame);
+NOKPROBE_SYMBOL(unwind_next);
-static void notrace walk_stackframe(struct task_struct *tsk,
- struct stackframe *frame,
- bool (*fn)(void *, unsigned long), void *data)
+static void notrace unwind(struct task_struct *tsk,
+ struct unwind_state *state,
+ stack_trace_consume_fn consume_entry, void *cookie)
{
while (1) {
int ret;
- if (!fn(data, frame->pc))
+ if (!consume_entry(cookie, state->pc))
break;
- ret = unwind_frame(tsk, frame);
+ ret = unwind_next(tsk, state);
if (ret < 0)
break;
}
}
-NOKPROBE_SYMBOL(walk_stackframe);
+NOKPROBE_SYMBOL(unwind);
static bool dump_backtrace_entry(void *arg, unsigned long where)
{
@@ -196,17 +210,17 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task,
struct pt_regs *regs)
{
- struct stackframe frame;
+ struct unwind_state state;
if (regs)
- start_backtrace(&frame, regs->regs[29], regs->pc);
+ unwind_init(&state, regs->regs[29], regs->pc);
else if (task == current)
- start_backtrace(&frame,
+ unwind_init(&state,
(unsigned long)__builtin_frame_address(1),
(unsigned long)__builtin_return_address(0));
else
- start_backtrace(&frame, thread_saved_fp(task),
+ unwind_init(&state, thread_saved_fp(task),
thread_saved_pc(task));
- walk_stackframe(task, &frame, consume_entry, cookie);
+ unwind(task, &state, consume_entry, cookie);
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index c938603b3ba0..92c69e5ac269 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -158,11 +158,36 @@ trace_exit:
syscall_trace_exit(regs);
}
-static inline void sve_user_discard(void)
+/*
+ * As per the ABI exit SME streaming mode and clear the SVE state not
+ * shared with FPSIMD on syscall entry.
+ */
+static inline void fp_user_discard(void)
{
+ /*
+ * If SME is active then exit streaming mode. If ZA is active
+ * then flush the SVE registers but leave userspace access to
+ * both SVE and SME enabled, otherwise disable SME for the
+ * task and fall through to disabling SVE too. This means
+ * that after a syscall we never have any streaming mode
+ * register state to track, if this changes the KVM code will
+ * need updating.
+ */
+ if (system_supports_sme() && test_thread_flag(TIF_SME)) {
+ u64 svcr = read_sysreg_s(SYS_SVCR_EL0);
+
+ if (svcr & SYS_SVCR_EL0_SM_MASK)
+ sme_smstop_sm();
+ }
+
if (!system_supports_sve())
return;
+ /*
+ * If SME is not active then disable SVE, the registers will
+ * be cleared when userspace next attempts to access them and
+ * we do not need to track the SVE register state until then.
+ */
clear_thread_flag(TIF_SVE);
/*
@@ -177,7 +202,7 @@ static inline void sve_user_discard(void)
void do_el0_svc(struct pt_regs *regs)
{
- sve_user_discard();
+ fp_user_discard();
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 0529fd57567e..6751621e5bea 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -821,6 +821,7 @@ static const char *esr_class_str[] = {
[ESR_ELx_EC_SVE] = "SVE",
[ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
[ESR_ELx_EC_FPAC] = "FPAC",
+ [ESR_ELx_EC_SME] = "SME",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 397fdac75cb1..441edb9c398c 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -82,6 +82,26 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
+
+ /*
+ * We don't currently support SME guests but if we leave
+ * things in streaming mode then when the guest starts running
+ * FPSIMD or SVE code it may generate SME traps so as a
+ * special case if we are in streaming mode we force the host
+ * state to be saved now and exit streaming mode so that we
+ * don't have to handle any SME traps for valid guest
+ * operations. Do this for ZA as well for now for simplicity.
+ */
+ if (system_supports_sme()) {
+ if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
+ vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED;
+
+ if (read_sysreg_s(SYS_SVCR_EL0) &
+ (SYS_SVCR_EL0_SM_MASK | SYS_SVCR_EL0_ZA_MASK)) {
+ vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
+ fpsimd_save_and_flush_cpu_state();
+ }
+ }
}
/*
@@ -109,9 +129,14 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
+ /*
+ * Currently we do not support SME guests so SVCR is
+ * always 0 and we just need a variable to point to.
+ */
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state,
- vcpu->arch.sve_max_vl);
+ vcpu->arch.sve_max_vl,
+ NULL, 0, &vcpu->arch.svcr);
clear_thread_flag(TIF_FOREIGN_FPSTATE);
update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));
@@ -130,6 +155,22 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags);
+ /*
+ * If we have VHE then the Hyp code will reset CPACR_EL1 to
+ * CPACR_EL1_DEFAULT and we need to reenable SME.
+ */
+ if (has_vhe() && system_supports_sme()) {
+ /* Also restore EL0 state seen on entry */
+ if (vcpu->arch.flags & KVM_ARM64_HOST_SME_ENABLED)
+ sysreg_clear_set(CPACR_EL1, 0,
+ CPACR_EL1_SMEN_EL0EN |
+ CPACR_EL1_SMEN_EL1EN);
+ else
+ sysreg_clear_set(CPACR_EL1,
+ CPACR_EL1_SMEN_EL0EN,
+ CPACR_EL1_SMEN_EL1EN);
+ }
+
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6410d21d8695..caace61ea459 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -47,10 +47,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
+ if (cpus_have_final_cap(ARM64_SME))
+ val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+ if (cpus_have_final_cap(ARM64_SME)) {
+ val = read_sysreg_s(SYS_HFGRTR_EL2);
+ val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK);
+ write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+ val = read_sysreg_s(SYS_HFGWTR_EL2);
+ val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK);
+ write_sysreg_s(val, SYS_HFGWTR_EL2);
+ }
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -94,9 +108,25 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
+ if (cpus_have_final_cap(ARM64_SME)) {
+ u64 val;
+
+ val = read_sysreg_s(SYS_HFGRTR_EL2);
+ val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK;
+ write_sysreg_s(val, SYS_HFGRTR_EL2);
+
+ val = read_sysreg_s(SYS_HFGWTR_EL2);
+ val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
+ HFGxTR_EL2_nSMPRI_EL1_MASK;
+ write_sysreg_s(val, SYS_HFGWTR_EL2);
+ }
+
cptr = CPTR_EL2_DEFAULT;
if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
cptr |= CPTR_EL2_TZ;
+ if (cpus_have_final_cap(ARM64_SME))
+ cptr &= ~CPTR_EL2_TSM;
write_sysreg(cptr, cptr_el2);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 262dfe03134d..969f20daf97a 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -41,7 +41,8 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
- val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
+ CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
/*
* With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
@@ -62,6 +63,10 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_fpsimd32(vcpu);
}
+ if (cpus_have_final_cap(ARM64_SME))
+ write_sysreg(read_sysreg(sctlr_el2) & ~SCTLR_ELx_ENTP2,
+ sctlr_el2);
+
write_sysreg(val, cpacr_el1);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
@@ -83,6 +88,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
*/
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
+ if (cpus_have_final_cap(ARM64_SME))
+ write_sysreg(read_sysreg(sctlr_el2) | SCTLR_ELx_ENTP2,
+ sctlr_el2);
+
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
if (!arm64_kernel_unmapped_at_el0())
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 7b45c040cc27..689e53dd4cb1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1132,6 +1132,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
case SYS_ID_AA64PFR1_EL1:
if (!kvm_has_mte(vcpu->kvm))
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
+
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_SME);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1553,7 +1555,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_UNALLOCATED(4,2),
ID_UNALLOCATED(4,3),
ID_SANITISED(ID_AA64ZFR0_EL1),
- ID_UNALLOCATED(4,5),
+ ID_HIDDEN(ID_AA64SMFR0_EL1),
ID_UNALLOCATED(4,6),
ID_UNALLOCATED(4,7),
@@ -1596,6 +1598,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
+ { SYS_DESC(SYS_SMPRI_EL1), undef_access },
+ { SYS_DESC(SYS_SMCR_EL1), undef_access },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
@@ -1678,8 +1682,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
+ { SYS_DESC(SYS_SMIDR_EL1), undef_access },
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
+ { SYS_DESC(SYS_SVCR_EL0), undef_access },
{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
.reset = reset_pmcr, .reg = PMCR_EL0 },
@@ -1719,6 +1725,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
+ { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index fb07e94242bb..a1410143ea62 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -90,6 +90,32 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#endif
+/* Current arm64 boot protocol requires 2MB alignment */
+#define CRASH_ALIGN SZ_2M
+
+#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
+#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+ insert_resource(&iomem_resource, &crashk_low_res);
+
+ return 0;
+}
+
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -100,17 +126,35 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
static void __init reserve_crashkernel(void)
{
unsigned long long crash_base, crash_size;
- unsigned long long crash_max = arm64_dma_phys_limit;
+ unsigned long long crash_low_size = 0;
+ unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ char *cmdline = boot_command_line;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ /* crashkernel=X[@offset] */
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
- /* no crashkernel= or invalid value specified */
- if (ret || !crash_size)
+ if (ret == -ENOENT) {
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ /*
+ * crashkernel=Y,low can be specified or not, but invalid value
+ * is not allowed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret && (ret != -ENOENT))
+ return;
+
+ crash_max = CRASH_ADDR_HIGH_MAX;
+ } else if (ret || !crash_size) {
+ /* The specified value is invalid */
return;
+ }
crash_size = PAGE_ALIGN(crash_size);
@@ -118,8 +162,7 @@ static void __init reserve_crashkernel(void)
if (crash_base)
crash_max = crash_base + crash_size;
- /* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_phys_alloc_range(crash_size, SZ_2M,
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
crash_base, crash_max);
if (!crash_base) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -127,6 +170,12 @@ static void __init reserve_crashkernel(void)
return;
}
+ if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
+ crash_low_size && reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
+ return;
+ }
+
pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
@@ -135,8 +184,12 @@ static void __init reserve_crashkernel(void)
* map. Inform kmemleak so that it won't try to access it.
*/
kmemleak_ignore_phys(crash_base);
+ if (crashk_low_res.end)
+ kmemleak_ignore_phys(crashk_low_res.start);
+
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+ insert_resource(&iomem_resource, &crashk_res);
}
/*
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 3ed418f70e3b..e52b289a27c2 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -43,6 +43,8 @@ KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
MTE
MTE_ASYMM
+SME
+SME_FA64
SPECTRE_V2
SPECTRE_V3A
SPECTRE_V4
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 1e31c7d21597..b09d73c2ba89 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -579,9 +579,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+#if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS)
extern void ftrace_graph_call(void);
static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
@@ -610,18 +608,7 @@ int ftrace_disable_ftrace_graph_caller(void)
return ftrace_mod_jmp(ip, &ftrace_stub);
}
-#else /* !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
-int ftrace_enable_ftrace_graph_caller(void)
-{
- return 0;
-}
-
-int ftrace_disable_ftrace_graph_caller(void)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
-#endif /* !CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
/*
* Hook the return address and push it in the stack of return addrs
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index ec315b060cd5..2f248d0acc04 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -973,16 +973,24 @@ static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
+/*
+ * The main usage of linux,usable-memory-range is for crash dump kernel.
+ * Originally, the number of usable-memory regions is one. Now there may
+ * be two regions, low region and high region.
+ * To make compatibility with existing user-space and older kdump, the low
+ * region is always the last range of linux,usable-memory-range if exist.
+ */
+#define MAX_USABLE_RANGES 2
+
/**
* early_init_dt_check_for_usable_mem_range - Decode usable memory range
* location from flat tree
*/
void __init early_init_dt_check_for_usable_mem_range(void)
{
- const __be32 *prop;
- int len;
- phys_addr_t cap_mem_addr;
- phys_addr_t cap_mem_size;
+ struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
+ const __be32 *prop, *endp;
+ int len, i;
unsigned long node = chosen_node_offset;
if ((long)node < 0)
@@ -991,16 +999,21 @@ void __init early_init_dt_check_for_usable_mem_range(void)
pr_debug("Looking for usable-memory-range property... ");
prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
- if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
+ if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
return;
- cap_mem_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
- cap_mem_size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ endp = prop + (len / sizeof(__be32));
+ for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
+ rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
+ rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
- pr_debug("cap_mem_start=%pa cap_mem_size=%pa\n", &cap_mem_addr,
- &cap_mem_size);
+ pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
+ i, &rgn[i].base, &rgn[i].size);
+ }
- memblock_cap_memory_range(cap_mem_addr, cap_mem_size);
+ memblock_cap_memory_range(rgn[0].base, rgn[0].size);
+ for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
+ memblock_add(rgn[i].base, rgn[i].size);
}
#ifdef CONFIG_SERIAL_EARLYCON
diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c
index b9bd1cff1793..8d374cc552be 100644
--- a/drivers/of/kexec.c
+++ b/drivers/of/kexec.c
@@ -386,6 +386,15 @@ void *of_kexec_alloc_and_setup_fdt(const struct kimage *image,
crashk_res.end - crashk_res.start + 1);
if (ret)
goto out;
+
+ if (crashk_low_res.end) {
+ ret = fdt_appendprop_addrrange(fdt, 0, chosen_node,
+ "linux,usable-memory-range",
+ crashk_low_res.start,
+ crashk_low_res.end - crashk_low_res.start + 1);
+ if (ret)
+ goto out;
+ }
}
/* add bootargs */
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 9c1d82be7a2f..80d8309652a4 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -39,7 +39,7 @@
#define CMN_CHILD_NODE_ADDR GENMASK(27, 0)
#define CMN_CHILD_NODE_EXTERNAL BIT(31)
-#define CMN_MAX_DIMENSION 8
+#define CMN_MAX_DIMENSION 12
#define CMN_MAX_XPS (CMN_MAX_DIMENSION * CMN_MAX_DIMENSION)
#define CMN_MAX_DTMS (CMN_MAX_XPS + (CMN_MAX_DIMENSION - 1) * 4)
@@ -52,6 +52,10 @@
#define CMN_INFO_RSP_VC_NUM GENMASK_ULL(53, 52)
#define CMN_INFO_DAT_VC_NUM GENMASK_ULL(51, 50)
+#define CMN_CFGM_INFO_GLOBAL_1 0x908
+#define CMN_INFO_SNP_VC_NUM GENMASK_ULL(3, 2)
+#define CMN_INFO_REQ_VC_NUM GENMASK_ULL(1, 0)
+
/* XPs also have some local topology info which has uses too */
#define CMN_MXP__CONNECT_INFO_P0 0x0008
#define CMN_MXP__CONNECT_INFO_P1 0x0010
@@ -59,18 +63,26 @@
#define CMN_MXP__CONNECT_INFO_P3 0x0030
#define CMN_MXP__CONNECT_INFO_P4 0x0038
#define CMN_MXP__CONNECT_INFO_P5 0x0040
+#define CMN__CONNECT_INFO_DEVICE_TYPE GENMASK_ULL(4, 0)
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
/* For most nodes, this is all there is */
#define CMN_PMU_EVENT_SEL 0x000
-#define CMN_PMU_EVENTn_ID_SHIFT(n) ((n) * 8)
+#define CMN__PMU_CBUSY_SNTHROTTLE_SEL GENMASK_ULL(44, 42)
+#define CMN__PMU_CLASS_OCCUP_ID GENMASK_ULL(36, 35)
+/* Technically this is 4 bits wide on DNs, but we only use 2 there anyway */
+#define CMN__PMU_OCCUP1_ID GENMASK_ULL(34, 32)
+
+/* HN-Ps are weird... */
+#define CMN_HNP_PMU_EVENT_SEL 0x008
/* DTMs live in the PMU space of XP registers */
#define CMN_DTM_WPn(n) (0x1A0 + (n) * 0x18)
#define CMN_DTM_WPn_CONFIG(n) (CMN_DTM_WPn(n) + 0x00)
-#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18,17)
+#define CMN_DTM_WPn_CONFIG_WP_CHN_NUM GENMASK_ULL(20, 19)
+#define CMN_DTM_WPn_CONFIG_WP_DEV_SEL2 GENMASK_ULL(18, 17)
#define CMN_DTM_WPn_CONFIG_WP_COMBINE BIT(9)
#define CMN_DTM_WPn_CONFIG_WP_EXCLUSIVE BIT(8)
#define CMN600_WPn_CONFIG_WP_COMBINE BIT(6)
@@ -143,8 +155,8 @@
/* Event attributes */
#define CMN_CONFIG_TYPE GENMASK_ULL(15, 0)
-#define CMN_CONFIG_EVENTID GENMASK_ULL(23, 16)
-#define CMN_CONFIG_OCCUPID GENMASK_ULL(27, 24)
+#define CMN_CONFIG_EVENTID GENMASK_ULL(26, 16)
+#define CMN_CONFIG_OCCUPID GENMASK_ULL(30, 27)
#define CMN_CONFIG_BYNODEID BIT_ULL(31)
#define CMN_CONFIG_NODEID GENMASK_ULL(47, 32)
@@ -177,9 +189,14 @@
enum cmn_model {
- CMN_ANY = -1,
CMN600 = 1,
- CI700 = 2,
+ CMN650 = 2,
+ CMN700 = 4,
+ CI700 = 8,
+ /* ...and then we can use bitmap tricks for commonality */
+ CMN_ANY = -1,
+ NOT_CMN600 = -2,
+ CMN_650ON = CMN650 | CMN700,
};
/* CMN-600 r0px shouldn't exist in silicon, thankfully */
@@ -191,6 +208,14 @@ enum cmn_revision {
CMN600_R2P0,
CMN600_R3P0,
CMN600_R3P1,
+ CMN650_R0P0 = 0,
+ CMN650_R1P0,
+ CMN650_R1P1,
+ CMN650_R2P0,
+ CMN650_R1P2,
+ CMN700_R0P0 = 0,
+ CMN700_R1P0,
+ CMN700_R2P0,
CI700_R0P0 = 0,
CI700_R1P0,
CI700_R2P0,
@@ -211,13 +236,26 @@ enum cmn_node_type {
CMN_TYPE_RND = 0xd,
CMN_TYPE_RNSAM = 0xf,
CMN_TYPE_MTSX,
+ CMN_TYPE_HNP,
CMN_TYPE_CXRA = 0x100,
- CMN_TYPE_CXHA = 0x101,
- CMN_TYPE_CXLA = 0x102,
+ CMN_TYPE_CXHA,
+ CMN_TYPE_CXLA,
+ CMN_TYPE_CCRA,
+ CMN_TYPE_CCHA,
+ CMN_TYPE_CCLA,
+ CMN_TYPE_CCLA_RNI,
/* Not a real node type */
CMN_TYPE_WP = 0x7770
};
+enum cmn_filter_select {
+ SEL_NONE = -1,
+ SEL_OCCUP1ID,
+ SEL_CLASS_OCCUP_ID,
+ SEL_CBUSY_SNTHROTTLE_SEL,
+ SEL_MAX
+};
+
struct arm_cmn_node {
void __iomem *pmu_base;
u16 id, logid;
@@ -227,15 +265,17 @@ struct arm_cmn_node {
union {
/* DN/HN-F/CXHA */
struct {
- u8 occupid_val;
- u8 occupid_count;
- };
+ u8 val : 4;
+ u8 count : 4;
+ } occupid[SEL_MAX];
/* XP */
u8 dtc;
};
union {
u8 event[4];
__le32 event_sel;
+ u16 event_w[4];
+ __le64 event_sel_w;
};
};
@@ -278,6 +318,8 @@ struct arm_cmn {
struct {
unsigned int rsp_vc_num : 2;
unsigned int dat_vc_num : 2;
+ unsigned int snp_vc_num : 2;
+ unsigned int req_vc_num : 2;
};
struct arm_cmn_node *xps;
@@ -307,9 +349,7 @@ struct arm_cmn_nodeid {
static int arm_cmn_xyidbits(const struct arm_cmn *cmn)
{
- int dim = max(cmn->mesh_x, cmn->mesh_y);
-
- return dim > 4 ? 3 : 2;
+ return fls((cmn->mesh_x - 1) | (cmn->mesh_y - 1) | 2);
}
static struct arm_cmn_nodeid arm_cmn_nid(const struct arm_cmn *cmn, u16 id)
@@ -361,7 +401,8 @@ static struct dentry *arm_cmn_debugfs;
#ifdef CONFIG_DEBUG_FS
static const char *arm_cmn_device_type(u8 type)
{
- switch(type) {
+ switch(FIELD_GET(CMN__CONNECT_INFO_DEVICE_TYPE, type)) {
+ case 0x00: return " |";
case 0x01: return " RN-I |";
case 0x02: return " RN-D |";
case 0x04: return " RN-F_B |";
@@ -371,6 +412,7 @@ static const char *arm_cmn_device_type(u8 type)
case 0x08: return " HN-T |";
case 0x09: return " HN-I |";
case 0x0a: return " HN-D |";
+ case 0x0b: return " HN-P |";
case 0x0c: return " SN-F |";
case 0x0d: return " SBSX |";
case 0x0e: return " HN-F |";
@@ -383,8 +425,12 @@ static const char *arm_cmn_device_type(u8 type)
case 0x15: return "RN-F_D_E|";
case 0x16: return " RN-F_C |";
case 0x17: return "RN-F_C_E|";
+ case 0x18: return " RN-F_E |";
+ case 0x19: return "RN-F_E_E|";
case 0x1c: return " MTSX |";
- default: return " |";
+ case 0x1d: return " HN-V |";
+ case 0x1e: return " CCG |";
+ default: return " ???? |";
}
}
@@ -492,11 +538,13 @@ static void arm_cmn_debugfs_init(struct arm_cmn *cmn, int id) {}
struct arm_cmn_hw_event {
struct arm_cmn_node *dn;
- u64 dtm_idx[2];
+ u64 dtm_idx[4];
unsigned int dtc_idx;
u8 dtcs_used;
u8 num_dns;
u8 dtm_offset;
+ bool wide_sel;
+ enum cmn_filter_select filter_sel;
};
#define for_each_hw_dn(hw, dn, i) \
@@ -522,7 +570,8 @@ struct arm_cmn_event_attr {
struct device_attribute attr;
enum cmn_model model;
enum cmn_node_type type;
- u8 eventid;
+ enum cmn_filter_select fsel;
+ u16 eventid;
u8 occupid;
};
@@ -532,23 +581,17 @@ struct arm_cmn_format_attr {
int config;
};
-#define CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid) \
+#define _CMN_EVENT_ATTR(_model, _name, _type, _eventid, _occupid, _fsel)\
(&((struct arm_cmn_event_attr[]) {{ \
.attr = __ATTR(_name, 0444, arm_cmn_event_show, NULL), \
.model = _model, \
.type = _type, \
.eventid = _eventid, \
.occupid = _occupid, \
+ .fsel = _fsel, \
}})[0].attr.attr)
-
-static bool arm_cmn_is_occup_event(enum cmn_model model,
- enum cmn_node_type type, unsigned int id)
-{
- if (type == CMN_TYPE_DVM)
- return (model == CMN600 && id == 0x05) ||
- (model == CI700 && id == 0x0c);
- return type == CMN_TYPE_HNF && id == 0x0f;
-}
+#define CMN_EVENT_ATTR(_model, _name, _type, _eventid) \
+ _CMN_EVENT_ATTR(_model, _name, _type, _eventid, 0, SEL_NONE)
static ssize_t arm_cmn_event_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -565,7 +608,7 @@ static ssize_t arm_cmn_event_show(struct device *dev,
"type=0x%x,eventid=0x%x,wp_dev_sel=?,wp_chn_sel=?,wp_grp=?,wp_val=?,wp_mask=?\n",
eattr->type, eattr->eventid);
- if (arm_cmn_is_occup_event(eattr->model, eattr->type, eattr->eventid))
+ if (eattr->fsel > SEL_NONE)
return sysfs_emit(buf, "type=0x%x,eventid=0x%x,occupid=0x%x\n",
eattr->type, eattr->eventid, eattr->occupid);
@@ -580,20 +623,25 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct arm_cmn *cmn = to_cmn(dev_get_drvdata(dev));
struct arm_cmn_event_attr *eattr;
+ enum cmn_node_type type;
+ u16 eventid;
eattr = container_of(attr, typeof(*eattr), attr.attr);
if (!(eattr->model & cmn->model))
return 0;
+ type = eattr->type;
+ eventid = eattr->eventid;
+
/* Watchpoints aren't nodes, so avoid confusion */
- if (eattr->type == CMN_TYPE_WP)
+ if (type == CMN_TYPE_WP)
return attr->mode;
/* Hide XP events for unused interfaces/channels */
- if (eattr->type == CMN_TYPE_XP) {
- unsigned int intf = (eattr->eventid >> 2) & 7;
- unsigned int chan = eattr->eventid >> 5;
+ if (type == CMN_TYPE_XP) {
+ unsigned int intf = (eventid >> 2) & 7;
+ unsigned int chan = eventid >> 5;
if ((intf & 4) && !(cmn->ports_used & BIT(intf & 3)))
return 0;
@@ -602,43 +650,107 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
return 0;
if ((chan == 5 && cmn->rsp_vc_num < 2) ||
- (chan == 6 && cmn->dat_vc_num < 2))
+ (chan == 6 && cmn->dat_vc_num < 2) ||
+ (chan == 7 && cmn->snp_vc_num < 2) ||
+ (chan == 8 && cmn->req_vc_num < 2))
return 0;
}
/* Revision-specific differences */
- if (cmn->model == CMN600 && cmn->rev < CMN600_R1P2) {
- if (eattr->type == CMN_TYPE_HNF && eattr->eventid == 0x1b)
- return 0;
+ if (cmn->model == CMN600) {
+ if (cmn->rev < CMN600_R1P3) {
+ if (type == CMN_TYPE_CXRA && eventid > 0x10)
+ return 0;
+ }
+ if (cmn->rev < CMN600_R1P2) {
+ if (type == CMN_TYPE_HNF && eventid == 0x1b)
+ return 0;
+ if (type == CMN_TYPE_CXRA || type == CMN_TYPE_CXHA)
+ return 0;
+ }
+ } else if (cmn->model == CMN650) {
+ if (cmn->rev < CMN650_R2P0 || cmn->rev == CMN650_R1P2) {
+ if (type == CMN_TYPE_HNF && eventid > 0x22)
+ return 0;
+ if (type == CMN_TYPE_SBSX && eventid == 0x17)
+ return 0;
+ if (type == CMN_TYPE_RNI && eventid > 0x10)
+ return 0;
+ }
+ } else if (cmn->model == CMN700) {
+ if (cmn->rev < CMN700_R2P0) {
+ if (type == CMN_TYPE_HNF && eventid > 0x2c)
+ return 0;
+ if (type == CMN_TYPE_CCHA && eventid > 0x74)
+ return 0;
+ if (type == CMN_TYPE_CCLA && eventid > 0x27)
+ return 0;
+ }
+ if (cmn->rev < CMN700_R1P0) {
+ if (type == CMN_TYPE_HNF && eventid > 0x2b)
+ return 0;
+ }
}
- if (!arm_cmn_node(cmn, eattr->type))
+ if (!arm_cmn_node(cmn, type))
return 0;
return attr->mode;
}
-#define _CMN_EVENT_DVM(_model, _name, _event, _occup) \
- CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup)
+#define _CMN_EVENT_DVM(_model, _name, _event, _occup, _fsel) \
+ _CMN_EVENT_ATTR(_model, dn_##_name, CMN_TYPE_DVM, _event, _occup, _fsel)
#define CMN_EVENT_DTC(_name) \
- CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0, 0)
-#define _CMN_EVENT_HNF(_model, _name, _event, _occup) \
- CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup)
+ CMN_EVENT_ATTR(CMN_ANY, dtc_##_name, CMN_TYPE_DTC, 0)
+#define _CMN_EVENT_HNF(_model, _name, _event, _occup, _fsel) \
+ _CMN_EVENT_ATTR(_model, hnf_##_name, CMN_TYPE_HNF, _event, _occup, _fsel)
#define CMN_EVENT_HNI(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, hni_##_name, CMN_TYPE_HNI, _event)
+#define CMN_EVENT_HNP(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, hnp_##_name, CMN_TYPE_HNP, _event)
#define __CMN_EVENT_XP(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, mxp_##_name, CMN_TYPE_XP, _event)
#define CMN_EVENT_SBSX(_model, _name, _event) \
- CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event, 0)
+ CMN_EVENT_ATTR(_model, sbsx_##_name, CMN_TYPE_SBSX, _event)
#define CMN_EVENT_RNID(_model, _name, _event) \
- CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event, 0)
+ CMN_EVENT_ATTR(_model, rnid_##_name, CMN_TYPE_RNI, _event)
#define CMN_EVENT_MTSX(_name, _event) \
- CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event, 0)
+ CMN_EVENT_ATTR(CMN_ANY, mtsx_##_name, CMN_TYPE_MTSX, _event)
+#define CMN_EVENT_CXRA(_model, _name, _event) \
+ CMN_EVENT_ATTR(_model, cxra_##_name, CMN_TYPE_CXRA, _event)
+#define CMN_EVENT_CXHA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, cxha_##_name, CMN_TYPE_CXHA, _event)
+#define CMN_EVENT_CCRA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccra_##_name, CMN_TYPE_CCRA, _event)
+#define CMN_EVENT_CCHA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccha_##_name, CMN_TYPE_CCHA, _event)
+#define CMN_EVENT_CCLA(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccla_##_name, CMN_TYPE_CCLA, _event)
+#define CMN_EVENT_CCLA_RNI(_name, _event) \
+ CMN_EVENT_ATTR(CMN_ANY, ccla_rni_##_name, CMN_TYPE_CCLA_RNI, _event)
#define CMN_EVENT_DVM(_model, _name, _event) \
- _CMN_EVENT_DVM(_model, _name, _event, 0)
+ _CMN_EVENT_DVM(_model, _name, _event, 0, SEL_NONE)
+#define CMN_EVENT_DVM_OCC(_model, _name, _event) \
+ _CMN_EVENT_DVM(_model, _name##_all, _event, 0, SEL_OCCUP1ID), \
+ _CMN_EVENT_DVM(_model, _name##_dvmop, _event, 1, SEL_OCCUP1ID), \
+ _CMN_EVENT_DVM(_model, _name##_dvmsync, _event, 2, SEL_OCCUP1ID)
#define CMN_EVENT_HNF(_model, _name, _event) \
- _CMN_EVENT_HNF(_model, _name, _event, 0)
+ _CMN_EVENT_HNF(_model, _name, _event, 0, SEL_NONE)
+#define CMN_EVENT_HNF_CLS(_model, _name, _event) \
+ _CMN_EVENT_HNF(_model, _name##_class0, _event, 0, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_HNF(_model, _name##_class1, _event, 1, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_HNF(_model, _name##_class2, _event, 2, SEL_CLASS_OCCUP_ID), \
+ _CMN_EVENT_HNF(_model, _name##_class3, _event, 3, SEL_CLASS_OCCUP_ID)
+#define CMN_EVENT_HNF_SNT(_model, _name, _event) \
+ _CMN_EVENT_HNF(_model, _name##_all, _event, 0, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group0_read, _event, 1, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group0_write, _event, 2, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group1_read, _event, 3, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_group1_write, _event, 4, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_read, _event, 5, SEL_CBUSY_SNTHROTTLE_SEL), \
+ _CMN_EVENT_HNF(_model, _name##_write, _event, 6, SEL_CBUSY_SNTHROTTLE_SEL)
+
#define _CMN_EVENT_XP(_name, _event) \
__CMN_EVENT_XP(e_##_name, (_event) | (0 << 2)), \
__CMN_EVENT_XP(w_##_name, (_event) | (1 << 2)), \
@@ -657,7 +769,9 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj,
_CMN_EVENT_XP(dat_##_name, (_event) | (3 << 5)), \
_CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \
_CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \
- _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5))
+ _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \
+ _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \
+ _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5))
static struct attribute *arm_cmn_event_attrs[] = {
@@ -672,23 +786,27 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_DVM(CMN600, rxreq_dvmsync, 0x02),
CMN_EVENT_DVM(CMN600, rxreq_dvmop_vmid_filtered, 0x03),
CMN_EVENT_DVM(CMN600, rxreq_retried, 0x04),
- _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_all, 0x05, 0),
- _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmop, 0x05, 1),
- _CMN_EVENT_DVM(CMN600, rxreq_trk_occupancy_dvmsync, 0x05, 2),
- CMN_EVENT_DVM(CI700, dvmop_tlbi, 0x01),
- CMN_EVENT_DVM(CI700, dvmop_bpi, 0x02),
- CMN_EVENT_DVM(CI700, dvmop_pici, 0x03),
- CMN_EVENT_DVM(CI700, dvmop_vici, 0x04),
- CMN_EVENT_DVM(CI700, dvmsync, 0x05),
- CMN_EVENT_DVM(CI700, vmid_filtered, 0x06),
- CMN_EVENT_DVM(CI700, rndop_filtered, 0x07),
- CMN_EVENT_DVM(CI700, retry, 0x08),
- CMN_EVENT_DVM(CI700, txsnp_flitv, 0x09),
- CMN_EVENT_DVM(CI700, txsnp_stall, 0x0a),
- CMN_EVENT_DVM(CI700, trkfull, 0x0b),
- _CMN_EVENT_DVM(CI700, trk_occupancy_all, 0x0c, 0),
- _CMN_EVENT_DVM(CI700, trk_occupancy_dvmop, 0x0c, 1),
- _CMN_EVENT_DVM(CI700, trk_occupancy_dvmsync, 0x0c, 2),
+ CMN_EVENT_DVM_OCC(CMN600, rxreq_trk_occupancy, 0x05),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_tlbi, 0x01),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_bpi, 0x02),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_pici, 0x03),
+ CMN_EVENT_DVM(NOT_CMN600, dvmop_vici, 0x04),
+ CMN_EVENT_DVM(NOT_CMN600, dvmsync, 0x05),
+ CMN_EVENT_DVM(NOT_CMN600, vmid_filtered, 0x06),
+ CMN_EVENT_DVM(NOT_CMN600, rndop_filtered, 0x07),
+ CMN_EVENT_DVM(NOT_CMN600, retry, 0x08),
+ CMN_EVENT_DVM(NOT_CMN600, txsnp_flitv, 0x09),
+ CMN_EVENT_DVM(NOT_CMN600, txsnp_stall, 0x0a),
+ CMN_EVENT_DVM(NOT_CMN600, trkfull, 0x0b),
+ CMN_EVENT_DVM_OCC(NOT_CMN600, trk_occupancy, 0x0c),
+ CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_cxha, 0x0d),
+ CMN_EVENT_DVM_OCC(CMN700, trk_occupancy_pdn, 0x0e),
+ CMN_EVENT_DVM(CMN700, trk_alloc, 0x0f),
+ CMN_EVENT_DVM(CMN700, trk_cxha_alloc, 0x10),
+ CMN_EVENT_DVM(CMN700, trk_pdn_alloc, 0x11),
+ CMN_EVENT_DVM(CMN700, txsnp_stall_limit, 0x12),
+ CMN_EVENT_DVM(CMN700, rxsnp_stall_starv, 0x13),
+ CMN_EVENT_DVM(CMN700, txsnp_sync_stall_op, 0x14),
CMN_EVENT_HNF(CMN_ANY, cache_miss, 0x01),
CMN_EVENT_HNF(CMN_ANY, slc_sf_cache_access, 0x02),
@@ -704,11 +822,11 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNF(CMN_ANY, mc_retries, 0x0c),
CMN_EVENT_HNF(CMN_ANY, mc_reqs, 0x0d),
CMN_EVENT_HNF(CMN_ANY, qos_hh_retry, 0x0e),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3),
- _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_all, 0x0f, 0, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_read, 0x0f, 1, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_write, 0x0f, 2, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_atomic, 0x0f, 3, SEL_OCCUP1ID),
+ _CMN_EVENT_HNF(CMN_ANY, qos_pocq_occupancy_stash, 0x0f, 4, SEL_OCCUP1ID),
CMN_EVENT_HNF(CMN_ANY, pocq_addrhaz, 0x10),
CMN_EVENT_HNF(CMN_ANY, pocq_atomic_addrhaz, 0x11),
CMN_EVENT_HNF(CMN_ANY, ld_st_swp_adq_full, 0x12),
@@ -725,9 +843,22 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNF(CMN_ANY, stash_snp_sent, 0x1d),
CMN_EVENT_HNF(CMN_ANY, stash_data_pull, 0x1e),
CMN_EVENT_HNF(CMN_ANY, snp_fwded, 0x1f),
- CMN_EVENT_HNF(CI700, atomic_fwd, 0x20),
- CMN_EVENT_HNF(CI700, mpam_hardlim, 0x21),
- CMN_EVENT_HNF(CI700, mpam_softlim, 0x22),
+ CMN_EVENT_HNF(NOT_CMN600, atomic_fwd, 0x20),
+ CMN_EVENT_HNF(NOT_CMN600, mpam_hardlim, 0x21),
+ CMN_EVENT_HNF(NOT_CMN600, mpam_softlim, 0x22),
+ CMN_EVENT_HNF(CMN_650ON, snp_sent_cluster, 0x23),
+ CMN_EVENT_HNF(CMN_650ON, sf_imprecise_evict, 0x24),
+ CMN_EVENT_HNF(CMN_650ON, sf_evict_shared_line, 0x25),
+ CMN_EVENT_HNF_CLS(CMN700, pocq_class_occup, 0x26),
+ CMN_EVENT_HNF_CLS(CMN700, pocq_class_retry, 0x27),
+ CMN_EVENT_HNF_CLS(CMN700, class_mc_reqs, 0x28),
+ CMN_EVENT_HNF_CLS(CMN700, class_cgnt_cmin, 0x29),
+ CMN_EVENT_HNF_SNT(CMN700, sn_throttle, 0x2a),
+ CMN_EVENT_HNF_SNT(CMN700, sn_throttle_min, 0x2b),
+ CMN_EVENT_HNF(CMN700, sf_precise_to_imprecise, 0x2c),
+ CMN_EVENT_HNF(CMN700, snp_intv_cln, 0x2d),
+ CMN_EVENT_HNF(CMN700, nc_excl, 0x2e),
+ CMN_EVENT_HNF(CMN700, excl_mon_ovfl, 0x2f),
CMN_EVENT_HNI(rrt_rd_occ_cnt_ovfl, 0x20),
CMN_EVENT_HNI(rrt_wr_occ_cnt_ovfl, 0x21),
@@ -749,12 +880,33 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_HNI(nonpcie_serialization, 0x31),
CMN_EVENT_HNI(pcie_serialization, 0x32),
+ /*
+ * HN-P events squat on top of the HN-I similarly to DVM events, except
+ * for being crammed into the same physical node as well. And of course
+ * where would the fun be if the same events were in the same order...
+ */
+ CMN_EVENT_HNP(rrt_wr_occ_cnt_ovfl, 0x01),
+ CMN_EVENT_HNP(rdt_wr_occ_cnt_ovfl, 0x02),
+ CMN_EVENT_HNP(wdb_occ_cnt_ovfl, 0x03),
+ CMN_EVENT_HNP(rrt_wr_alloc, 0x04),
+ CMN_EVENT_HNP(rdt_wr_alloc, 0x05),
+ CMN_EVENT_HNP(wdb_alloc, 0x06),
+ CMN_EVENT_HNP(awvalid_no_awready, 0x07),
+ CMN_EVENT_HNP(awready_no_awvalid, 0x08),
+ CMN_EVENT_HNP(wvalid_no_wready, 0x09),
+ CMN_EVENT_HNP(rrt_rd_occ_cnt_ovfl, 0x11),
+ CMN_EVENT_HNP(rdt_rd_occ_cnt_ovfl, 0x12),
+ CMN_EVENT_HNP(rrt_rd_alloc, 0x13),
+ CMN_EVENT_HNP(rdt_rd_alloc, 0x14),
+ CMN_EVENT_HNP(arvalid_no_arready, 0x15),
+ CMN_EVENT_HNP(arready_no_arvalid, 0x16),
+
CMN_EVENT_XP(txflit_valid, 0x01),
CMN_EVENT_XP(txflit_stall, 0x02),
CMN_EVENT_XP(partial_dat_flit, 0x03),
/* We treat watchpoints as a special made-up class of XP events */
- CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP, 0),
- CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN, 0),
+ CMN_EVENT_ATTR(CMN_ANY, watchpoint_up, CMN_TYPE_WP, CMN_WP_UP),
+ CMN_EVENT_ATTR(CMN_ANY, watchpoint_down, CMN_TYPE_WP, CMN_WP_DOWN),
CMN_EVENT_SBSX(CMN_ANY, rd_req, 0x01),
CMN_EVENT_SBSX(CMN_ANY, wr_req, 0x02),
@@ -768,7 +920,7 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_SBSX(CMN_ANY, wdb_occ_cnt_ovfl, 0x14),
CMN_EVENT_SBSX(CMN_ANY, rd_axi_trkr_occ_cnt_ovfl, 0x15),
CMN_EVENT_SBSX(CMN_ANY, cmo_axi_trkr_occ_cnt_ovfl, 0x16),
- CMN_EVENT_SBSX(CI700, rdb_occ_cnt_ovfl, 0x17),
+ CMN_EVENT_SBSX(NOT_CMN600, rdb_occ_cnt_ovfl, 0x17),
CMN_EVENT_SBSX(CMN_ANY, arvalid_no_arready, 0x21),
CMN_EVENT_SBSX(CMN_ANY, awvalid_no_awready, 0x22),
CMN_EVENT_SBSX(CMN_ANY, wvalid_no_wready, 0x23),
@@ -795,12 +947,25 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_RNID(CMN600, rdb_replay, 0x12),
CMN_EVENT_RNID(CMN600, rdb_hybrid, 0x13),
CMN_EVENT_RNID(CMN600, rdb_ord, 0x14),
- CMN_EVENT_RNID(CI700, padb_occ_ovfl, 0x11),
- CMN_EVENT_RNID(CI700, rpdb_occ_ovfl, 0x12),
- CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice1, 0x13),
- CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice2, 0x14),
- CMN_EVENT_RNID(CI700, rrt_occup_ovfl_slice3, 0x15),
- CMN_EVENT_RNID(CI700, wrt_throttled, 0x16),
+ CMN_EVENT_RNID(NOT_CMN600, padb_occ_ovfl, 0x11),
+ CMN_EVENT_RNID(NOT_CMN600, rpdb_occ_ovfl, 0x12),
+ CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice1, 0x13),
+ CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice2, 0x14),
+ CMN_EVENT_RNID(NOT_CMN600, rrt_occup_ovfl_slice3, 0x15),
+ CMN_EVENT_RNID(NOT_CMN600, wrt_throttled, 0x16),
+ CMN_EVENT_RNID(CMN700, ldb_full, 0x17),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice0, 0x18),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice1, 0x19),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice2, 0x1a),
+ CMN_EVENT_RNID(CMN700, rrt_rd_req_occup_ovfl_slice3, 0x1b),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice0, 0x1c),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice1, 0x1d),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice2, 0x1e),
+ CMN_EVENT_RNID(CMN700, rrt_burst_occup_ovfl_slice3, 0x1f),
+ CMN_EVENT_RNID(CMN700, rrt_burst_alloc, 0x20),
+ CMN_EVENT_RNID(CMN700, awid_hash, 0x21),
+ CMN_EVENT_RNID(CMN700, atomic_alloc, 0x22),
+ CMN_EVENT_RNID(CMN700, atomic_occ_ovfl, 0x23),
CMN_EVENT_MTSX(tc_lookup, 0x01),
CMN_EVENT_MTSX(tc_fill, 0x02),
@@ -815,6 +980,118 @@ static struct attribute *arm_cmn_event_attrs[] = {
CMN_EVENT_MTSX(tcq_occ_cnt_ovfl, 0x0b),
CMN_EVENT_MTSX(tdb_occ_cnt_ovfl, 0x0c),
+ CMN_EVENT_CXRA(CMN_ANY, rht_occ, 0x01),
+ CMN_EVENT_CXRA(CMN_ANY, sht_occ, 0x02),
+ CMN_EVENT_CXRA(CMN_ANY, rdb_occ, 0x03),
+ CMN_EVENT_CXRA(CMN_ANY, wdb_occ, 0x04),
+ CMN_EVENT_CXRA(CMN_ANY, ssb_occ, 0x05),
+ CMN_EVENT_CXRA(CMN_ANY, snp_bcasts, 0x06),
+ CMN_EVENT_CXRA(CMN_ANY, req_chains, 0x07),
+ CMN_EVENT_CXRA(CMN_ANY, req_chain_avglen, 0x08),
+ CMN_EVENT_CXRA(CMN_ANY, chirsp_stalls, 0x09),
+ CMN_EVENT_CXRA(CMN_ANY, chidat_stalls, 0x0a),
+ CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link0, 0x0b),
+ CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link1, 0x0c),
+ CMN_EVENT_CXRA(CMN_ANY, cxreq_pcrd_stalls_link2, 0x0d),
+ CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link0, 0x0e),
+ CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link1, 0x0f),
+ CMN_EVENT_CXRA(CMN_ANY, cxdat_pcrd_stalls_link2, 0x10),
+ CMN_EVENT_CXRA(CMN_ANY, external_chirsp_stalls, 0x11),
+ CMN_EVENT_CXRA(CMN_ANY, external_chidat_stalls, 0x12),
+ CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link0, 0x13),
+ CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link1, 0x14),
+ CMN_EVENT_CXRA(NOT_CMN600, cxmisc_pcrd_stalls_link2, 0x15),
+
+ CMN_EVENT_CXHA(rddatbyp, 0x21),
+ CMN_EVENT_CXHA(chirsp_up_stall, 0x22),
+ CMN_EVENT_CXHA(chidat_up_stall, 0x23),
+ CMN_EVENT_CXHA(snppcrd_link0_stall, 0x24),
+ CMN_EVENT_CXHA(snppcrd_link1_stall, 0x25),
+ CMN_EVENT_CXHA(snppcrd_link2_stall, 0x26),
+ CMN_EVENT_CXHA(reqtrk_occ, 0x27),
+ CMN_EVENT_CXHA(rdb_occ, 0x28),
+ CMN_EVENT_CXHA(rdbyp_occ, 0x29),
+ CMN_EVENT_CXHA(wdb_occ, 0x2a),
+ CMN_EVENT_CXHA(snptrk_occ, 0x2b),
+ CMN_EVENT_CXHA(sdb_occ, 0x2c),
+ CMN_EVENT_CXHA(snphaz_occ, 0x2d),
+
+ CMN_EVENT_CCRA(rht_occ, 0x41),
+ CMN_EVENT_CCRA(sht_occ, 0x42),
+ CMN_EVENT_CCRA(rdb_occ, 0x43),
+ CMN_EVENT_CCRA(wdb_occ, 0x44),
+ CMN_EVENT_CCRA(ssb_occ, 0x45),
+ CMN_EVENT_CCRA(snp_bcasts, 0x46),
+ CMN_EVENT_CCRA(req_chains, 0x47),
+ CMN_EVENT_CCRA(req_chain_avglen, 0x48),
+ CMN_EVENT_CCRA(chirsp_stalls, 0x49),
+ CMN_EVENT_CCRA(chidat_stalls, 0x4a),
+ CMN_EVENT_CCRA(cxreq_pcrd_stalls_link0, 0x4b),
+ CMN_EVENT_CCRA(cxreq_pcrd_stalls_link1, 0x4c),
+ CMN_EVENT_CCRA(cxreq_pcrd_stalls_link2, 0x4d),
+ CMN_EVENT_CCRA(cxdat_pcrd_stalls_link0, 0x4e),
+ CMN_EVENT_CCRA(cxdat_pcrd_stalls_link1, 0x4f),
+ CMN_EVENT_CCRA(cxdat_pcrd_stalls_link2, 0x50),
+ CMN_EVENT_CCRA(external_chirsp_stalls, 0x51),
+ CMN_EVENT_CCRA(external_chidat_stalls, 0x52),
+ CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link0, 0x53),
+ CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link1, 0x54),
+ CMN_EVENT_CCRA(cxmisc_pcrd_stalls_link2, 0x55),
+ CMN_EVENT_CCRA(rht_alloc, 0x56),
+ CMN_EVENT_CCRA(sht_alloc, 0x57),
+ CMN_EVENT_CCRA(rdb_alloc, 0x58),
+ CMN_EVENT_CCRA(wdb_alloc, 0x59),
+ CMN_EVENT_CCRA(ssb_alloc, 0x5a),
+
+ CMN_EVENT_CCHA(rddatbyp, 0x61),
+ CMN_EVENT_CCHA(chirsp_up_stall, 0x62),
+ CMN_EVENT_CCHA(chidat_up_stall, 0x63),
+ CMN_EVENT_CCHA(snppcrd_link0_stall, 0x64),
+ CMN_EVENT_CCHA(snppcrd_link1_stall, 0x65),
+ CMN_EVENT_CCHA(snppcrd_link2_stall, 0x66),
+ CMN_EVENT_CCHA(reqtrk_occ, 0x67),
+ CMN_EVENT_CCHA(rdb_occ, 0x68),
+ CMN_EVENT_CCHA(rdbyp_occ, 0x69),
+ CMN_EVENT_CCHA(wdb_occ, 0x6a),
+ CMN_EVENT_CCHA(snptrk_occ, 0x6b),
+ CMN_EVENT_CCHA(sdb_occ, 0x6c),
+ CMN_EVENT_CCHA(snphaz_occ, 0x6d),
+ CMN_EVENT_CCHA(reqtrk_alloc, 0x6e),
+ CMN_EVENT_CCHA(rdb_alloc, 0x6f),
+ CMN_EVENT_CCHA(rdbyp_alloc, 0x70),
+ CMN_EVENT_CCHA(wdb_alloc, 0x71),
+ CMN_EVENT_CCHA(snptrk_alloc, 0x72),
+ CMN_EVENT_CCHA(sdb_alloc, 0x73),
+ CMN_EVENT_CCHA(snphaz_alloc, 0x74),
+ CMN_EVENT_CCHA(pb_rhu_req_occ, 0x75),
+ CMN_EVENT_CCHA(pb_rhu_req_alloc, 0x76),
+ CMN_EVENT_CCHA(pb_rhu_pcie_req_occ, 0x77),
+ CMN_EVENT_CCHA(pb_rhu_pcie_req_alloc, 0x78),
+ CMN_EVENT_CCHA(pb_pcie_wr_req_occ, 0x79),
+ CMN_EVENT_CCHA(pb_pcie_wr_req_alloc, 0x7a),
+ CMN_EVENT_CCHA(pb_pcie_reg_req_occ, 0x7b),
+ CMN_EVENT_CCHA(pb_pcie_reg_req_alloc, 0x7c),
+ CMN_EVENT_CCHA(pb_pcie_rsvd_req_occ, 0x7d),
+ CMN_EVENT_CCHA(pb_pcie_rsvd_req_alloc, 0x7e),
+ CMN_EVENT_CCHA(pb_rhu_dat_occ, 0x7f),
+ CMN_EVENT_CCHA(pb_rhu_dat_alloc, 0x80),
+ CMN_EVENT_CCHA(pb_rhu_pcie_dat_occ, 0x81),
+ CMN_EVENT_CCHA(pb_rhu_pcie_dat_alloc, 0x82),
+ CMN_EVENT_CCHA(pb_pcie_wr_dat_occ, 0x83),
+ CMN_EVENT_CCHA(pb_pcie_wr_dat_alloc, 0x84),
+
+ CMN_EVENT_CCLA(rx_cxs, 0x21),
+ CMN_EVENT_CCLA(tx_cxs, 0x22),
+ CMN_EVENT_CCLA(rx_cxs_avg_size, 0x23),
+ CMN_EVENT_CCLA(tx_cxs_avg_size, 0x24),
+ CMN_EVENT_CCLA(tx_cxs_lcrd_backpressure, 0x25),
+ CMN_EVENT_CCLA(link_crdbuf_occ, 0x26),
+ CMN_EVENT_CCLA(link_crdbuf_alloc, 0x27),
+ CMN_EVENT_CCLA(pfwd_rcvr_cxs, 0x28),
+ CMN_EVENT_CCLA(pfwd_sndr_num_flits, 0x29),
+ CMN_EVENT_CCLA(pfwd_sndr_stalls_static_crd, 0x2a),
+ CMN_EVENT_CCLA(pfwd_sndr_stalls_dynmaic_crd, 0x2b),
+
NULL
};
@@ -1032,6 +1309,42 @@ static void arm_cmn_event_read(struct perf_event *event)
local64_add(delta, &event->count);
}
+static int arm_cmn_set_event_sel_hi(struct arm_cmn_node *dn,
+ enum cmn_filter_select fsel, u8 occupid)
+{
+ u64 reg;
+
+ if (fsel == SEL_NONE)
+ return 0;
+
+ if (!dn->occupid[fsel].count) {
+ dn->occupid[fsel].val = occupid;
+ reg = FIELD_PREP(CMN__PMU_CBUSY_SNTHROTTLE_SEL,
+ dn->occupid[SEL_CBUSY_SNTHROTTLE_SEL].val) |
+ FIELD_PREP(CMN__PMU_CLASS_OCCUP_ID,
+ dn->occupid[SEL_CLASS_OCCUP_ID].val) |
+ FIELD_PREP(CMN__PMU_OCCUP1_ID,
+ dn->occupid[SEL_OCCUP1ID].val);
+ writel_relaxed(reg >> 32, dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
+ } else if (dn->occupid[fsel].val != occupid) {
+ return -EBUSY;
+ }
+ dn->occupid[fsel].count++;
+ return 0;
+}
+
+static void arm_cmn_set_event_sel_lo(struct arm_cmn_node *dn, int dtm_idx,
+ int eventid, bool wide_sel)
+{
+ if (wide_sel) {
+ dn->event_w[dtm_idx] = eventid;
+ writeq_relaxed(le64_to_cpu(dn->event_sel_w), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ } else {
+ dn->event[dtm_idx] = eventid;
+ writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ }
+}
+
static void arm_cmn_event_start(struct perf_event *event, int flags)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
@@ -1058,8 +1371,8 @@ static void arm_cmn_event_start(struct perf_event *event, int flags)
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- dn->event[dtm_idx] = CMN_EVENT_EVENTID(event);
- writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ arm_cmn_set_event_sel_lo(dn, dtm_idx, CMN_EVENT_EVENTID(event),
+ hw->wide_sel);
}
}
@@ -1086,8 +1399,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
} else for_each_hw_dn(hw, dn, i) {
int dtm_idx = arm_cmn_get_index(hw->dtm_idx, i);
- dn->event[dtm_idx] = 0;
- writel_relaxed(le32_to_cpu(dn->event_sel), dn->pmu_base + CMN_PMU_EVENT_SEL);
+ arm_cmn_set_event_sel_lo(dn, dtm_idx, 0, hw->wide_sel);
}
arm_cmn_event_read(event);
@@ -1095,7 +1407,7 @@ static void arm_cmn_event_stop(struct perf_event *event, int flags)
struct arm_cmn_val {
u8 dtm_count[CMN_MAX_DTMS];
- u8 occupid[CMN_MAX_DTMS];
+ u8 occupid[CMN_MAX_DTMS][SEL_MAX];
u8 wp[CMN_MAX_DTMS][4];
int dtc_count;
bool cycles;
@@ -1108,7 +1420,6 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
struct arm_cmn_node *dn;
enum cmn_node_type type;
int i;
- u8 occupid;
if (is_software_event(event))
return;
@@ -1120,16 +1431,14 @@ static void arm_cmn_val_add_event(struct arm_cmn *cmn, struct arm_cmn_val *val,
}
val->dtc_count++;
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
- occupid = CMN_EVENT_OCCUPID(event) + 1;
- else
- occupid = 0;
for_each_hw_dn(hw, dn, i) {
- int wp_idx, dtm = dn->dtm;
+ int wp_idx, dtm = dn->dtm, sel = hw->filter_sel;
val->dtm_count[dtm]++;
- val->occupid[dtm] = occupid;
+
+ if (sel > SEL_NONE)
+ val->occupid[dtm][sel] = CMN_EVENT_OCCUPID(event) + 1;
if (type != CMN_TYPE_WP)
continue;
@@ -1147,7 +1456,6 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
enum cmn_node_type type;
struct arm_cmn_val *val;
int i, ret = -EINVAL;
- u8 occupid;
if (leader == event)
return 0;
@@ -1172,18 +1480,14 @@ static int arm_cmn_validate_group(struct arm_cmn *cmn, struct perf_event *event)
if (val->dtc_count == CMN_DT_NUM_COUNTERS)
goto done;
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
- occupid = CMN_EVENT_OCCUPID(event) + 1;
- else
- occupid = 0;
-
for_each_hw_dn(hw, dn, i) {
- int wp_idx, wp_cmb, dtm = dn->dtm;
+ int wp_idx, wp_cmb, dtm = dn->dtm, sel = hw->filter_sel;
if (val->dtm_count[dtm] == CMN_DTM_NUM_COUNTERS)
goto done;
- if (occupid && val->occupid[dtm] && occupid != val->occupid[dtm])
+ if (sel > SEL_NONE && val->occupid[dtm][sel] &&
+ val->occupid[dtm][sel] != CMN_EVENT_OCCUPID(event) + 1)
goto done;
if (type != CMN_TYPE_WP)
@@ -1204,6 +1508,22 @@ done:
return ret;
}
+static enum cmn_filter_select arm_cmn_filter_sel(enum cmn_model model,
+ enum cmn_node_type type,
+ unsigned int eventid)
+{
+ struct arm_cmn_event_attr *e;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm_cmn_event_attrs) - 1; i++) {
+ e = container_of(arm_cmn_event_attrs[i], typeof(*e), attr.attr);
+ if (e->model & model && e->type == type && e->eventid == eventid)
+ return e->fsel;
+ }
+ return SEL_NONE;
+}
+
+
static int arm_cmn_event_init(struct perf_event *event)
{
struct arm_cmn *cmn = to_cmn(event->pmu);
@@ -1228,18 +1548,23 @@ static int arm_cmn_event_init(struct perf_event *event)
if (type == CMN_TYPE_DTC)
return 0;
+ eventid = CMN_EVENT_EVENTID(event);
/* For watchpoints we need the actual XP node here */
if (type == CMN_TYPE_WP) {
type = CMN_TYPE_XP;
/* ...and we need a "real" direction */
- eventid = CMN_EVENT_EVENTID(event);
if (eventid != CMN_WP_UP && eventid != CMN_WP_DOWN)
return -EINVAL;
/* ...but the DTM may depend on which port we're watching */
if (cmn->multi_dtm)
hw->dtm_offset = CMN_EVENT_WP_DEV_SEL(event) / 2;
+ } else if (type == CMN_TYPE_XP && cmn->model == CMN700) {
+ hw->wide_sel = true;
}
+ /* This is sufficiently annoying to recalculate, so cache it */
+ hw->filter_sel = arm_cmn_filter_sel(cmn->model, type, eventid);
+
bynodeid = CMN_EVENT_BYNODEID(event);
nodeid = CMN_EVENT_NODEID(event);
@@ -1281,8 +1606,8 @@ static void arm_cmn_event_clear(struct arm_cmn *cmn, struct perf_event *event,
if (type == CMN_TYPE_WP)
dtm->wp_event[arm_cmn_wp_idx(event)] = -1;
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event)))
- hw->dn[i].occupid_count--;
+ if (hw->filter_sel > SEL_NONE)
+ hw->dn[i].occupid[hw->filter_sel].count--;
dtm->pmu_config_low &= ~CMN__PMEVCNT_PAIRED(dtm_idx);
writel_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
@@ -1362,18 +1687,8 @@ static int arm_cmn_event_add(struct perf_event *event, int flags)
input_sel = CMN__PMEVCNT0_INPUT_SEL_DEV + dtm_idx +
(nid.port << 4) + (nid.dev << 2);
- if (arm_cmn_is_occup_event(cmn->model, type, CMN_EVENT_EVENTID(event))) {
- u8 occupid = CMN_EVENT_OCCUPID(event);
-
- if (dn->occupid_count == 0) {
- dn->occupid_val = occupid;
- writel_relaxed(occupid,
- dn->pmu_base + CMN_PMU_EVENT_SEL + 4);
- } else if (dn->occupid_val != occupid) {
- goto free_dtms;
- }
- dn->occupid_count++;
- }
+ if (arm_cmn_set_event_sel_hi(dn, hw->filter_sel, CMN_EVENT_OCCUPID(event)))
+ goto free_dtms;
}
arm_cmn_set_index(hw->dtm_idx, i, dtm_idx);
@@ -1622,6 +1937,10 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn)
/* To the PMU, RN-Ds don't add anything over RN-Is, so smoosh them together */
if (dn->type == CMN_TYPE_RND)
dn->type = CMN_TYPE_RNI;
+
+ /* We split the RN-I off already, so let the CCLA part match CCLA events */
+ if (dn->type == CMN_TYPE_CCLA_RNI)
+ dn->type = CMN_TYPE_CCLA;
}
writel_relaxed(CMN_DT_DTC_CTL_DT_EN, cmn->dtc[0].base + CMN_DT_DTC_CTL);
@@ -1652,6 +1971,18 @@ static void arm_cmn_init_node_info(struct arm_cmn *cmn, u32 offset, struct arm_c
node->type, node->logid, offset);
}
+static enum cmn_node_type arm_cmn_subtype(enum cmn_node_type type)
+{
+ switch (type) {
+ case CMN_TYPE_HNP:
+ return CMN_TYPE_HNI;
+ case CMN_TYPE_CCLA_RNI:
+ return CMN_TYPE_RNI;
+ default:
+ return CMN_TYPE_INVALID;
+ }
+}
+
static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
{
void __iomem *cfg_region;
@@ -1676,6 +2007,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->rsp_vc_num = FIELD_GET(CMN_INFO_RSP_VC_NUM, reg);
cmn->dat_vc_num = FIELD_GET(CMN_INFO_DAT_VC_NUM, reg);
+ reg = readq_relaxed(cfg_region + CMN_CFGM_INFO_GLOBAL_1);
+ cmn->snp_vc_num = FIELD_GET(CMN_INFO_SNP_VC_NUM, reg);
+ cmn->req_vc_num = FIELD_GET(CMN_INFO_REQ_VC_NUM, reg);
+
reg = readq_relaxed(cfg_region + CMN_CHILD_INFO);
child_count = FIELD_GET(CMN_CI_CHILD_COUNT, reg);
child_poff = FIELD_GET(CMN_CI_CHILD_PTR_OFFSET, reg);
@@ -1692,8 +2027,13 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
cmn->num_dns += FIELD_GET(CMN_CI_CHILD_COUNT, reg);
}
- /* Cheeky +1 to help terminate pointer-based iteration later */
- dn = devm_kcalloc(cmn->dev, cmn->num_dns + 1, sizeof(*dn), GFP_KERNEL);
+ /*
+ * Some nodes effectively have two separate types, which we'll handle
+ * by creating one of each internally. For a (very) safe initial upper
+ * bound, account for double the number of non-XP nodes.
+ */
+ dn = devm_kcalloc(cmn->dev, cmn->num_dns * 2 - cmn->num_xps,
+ sizeof(*dn), GFP_KERNEL);
if (!dn)
return -ENOMEM;
@@ -1794,6 +2134,9 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_MTSX:
case CMN_TYPE_CXRA:
case CMN_TYPE_CXHA:
+ case CMN_TYPE_CCRA:
+ case CMN_TYPE_CCHA:
+ case CMN_TYPE_CCLA:
dn++;
break;
/* Nothing to see here */
@@ -1802,6 +2145,19 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
case CMN_TYPE_RNSAM:
case CMN_TYPE_CXLA:
break;
+ /*
+ * Split "optimised" combination nodes into separate
+ * types for the different event sets. Offsetting the
+ * base address lets us handle the second pmu_event_sel
+ * register via the normal mechanism later.
+ */
+ case CMN_TYPE_HNP:
+ case CMN_TYPE_CCLA_RNI:
+ dn[1] = dn[0];
+ dn[0].pmu_base += CMN_HNP_PMU_EVENT_SEL;
+ dn[1].type = arm_cmn_subtype(dn->type);
+ dn += 2;
+ break;
/* Something has gone horribly wrong */
default:
dev_err(cmn->dev, "invalid device node type: 0x%x\n", dn->type);
@@ -1810,9 +2166,10 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
}
}
- /* Correct for any nodes we skipped */
+ /* Correct for any nodes we added or skipped */
cmn->num_dns = dn - cmn->dns;
+ /* Cheeky +1 to help terminate pointer-based iteration later */
sz = (void *)(dn + 1) - (void *)cmn->dns;
dn = devm_krealloc(cmn->dev, cmn->dns, sz, GFP_KERNEL);
if (dn)
@@ -1970,6 +2327,8 @@ static int arm_cmn_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id arm_cmn_of_match[] = {
{ .compatible = "arm,cmn-600", .data = (void *)CMN600 },
+ { .compatible = "arm,cmn-650", .data = (void *)CMN650 },
+ { .compatible = "arm,cmn-700", .data = (void *)CMN700 },
{ .compatible = "arm,ci-700", .data = (void *)CI700 },
{}
};
@@ -1979,6 +2338,8 @@ MODULE_DEVICE_TABLE(of, arm_cmn_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id arm_cmn_acpi_match[] = {
{ "ARMHC600", CMN600 },
+ { "ARMHC650", CMN650 },
+ { "ARMHC700", CMN700 },
{}
};
MODULE_DEVICE_TABLE(acpi, arm_cmn_acpi_match);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index f5c7a845cd7b..96ffadd654ff 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -159,7 +159,9 @@ static int arm_pmu_acpi_parse_irqs(void)
* them with their PMUs.
*/
per_cpu(pmu_irqs, cpu) = irq;
- armpmu_request_irq(irq, cpu);
+ err = armpmu_request_irq(irq, cpu);
+ if (err)
+ goto out_err;
}
return 0;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d44bcc29d99c..db670b265897 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1035,6 +1035,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
fallthrough;
case 2:
spe_pmu->counter_sz = 12;
+ break;
+ case 3:
+ spe_pmu->counter_sz = 16;
}
dev_info(dev,
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 506ed39e3266..6be83517acaa 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
- hisi_uncore_pa_pmu.o
+ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
new file mode 100644
index 000000000000..a9bb73f76be4
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support
+ *
+ * Copyright (C) 2022 HiSilicon Limited
+ * Author: Qi Liu <liuqi115@huawei.com>
+ *
+ * This code is based on the uncore PMUs like arm-cci and arm-ccn.
+ */
+
+#define pr_fmt(fmt) "cpa pmu: " fmt
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/smp.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* CPA register definition */
+#define CPA_PERF_CTRL 0x1c00
+#define CPA_EVENT_CTRL 0x1c04
+#define CPA_INT_MASK 0x1c70
+#define CPA_INT_STATUS 0x1c78
+#define CPA_INT_CLEAR 0x1c7c
+#define CPA_EVENT_TYPE0 0x1c80
+#define CPA_VERSION 0x1cf0
+#define CPA_CNT0_LOWER 0x1d00
+#define CPA_CFG_REG 0x0534
+
+/* CPA operation command */
+#define CPA_PERF_CTRL_EN BIT_ULL(0)
+#define CPA_EVTYPE_MASK 0xffUL
+#define CPA_PM_CTRL BIT_ULL(9)
+
+/* CPA has 8-counters */
+#define CPA_NR_COUNTERS 0x8
+#define CPA_COUNTER_BITS 64
+#define CPA_NR_EVENTS 0xff
+#define CPA_REG_OFFSET 0x8
+
+static u32 hisi_cpa_pmu_get_counter_offset(int idx)
+{
+ return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET);
+}
+
+static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
+}
+
+static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx,
+ u32 type)
+{
+ u32 reg, reg_idx, shift, val;
+
+ /*
+ * Select the appropriate event select register(CPA_EVENT_TYPE0/1).
+ * There are 2 event select registers for the 8 hardware counters.
+ * Event code is 8-bits and for the former 4 hardware counters,
+ * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
+ * CPA_EVENT_TYPE1 is chosen.
+ */
+ reg = CPA_EVENT_TYPE0 + (idx / 4) * 4;
+ reg_idx = idx % 4;
+ shift = CPA_REG_OFFSET * reg_idx;
+
+ /* Write event code to CPA_EVENT_TYPEx Register */
+ val = readl(cpa_pmu->base + reg);
+ val &= ~(CPA_EVTYPE_MASK << shift);
+ val |= type << shift;
+ writel(val, cpa_pmu->base + reg);
+}
+
+static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_PERF_CTRL);
+ val |= CPA_PERF_CTRL_EN;
+ writel(val, cpa_pmu->base + CPA_PERF_CTRL);
+}
+
+static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_PERF_CTRL);
+ val &= ~(CPA_PERF_CTRL_EN);
+ writel(val, cpa_pmu->base + CPA_PERF_CTRL);
+}
+
+static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_CFG_REG);
+ val |= CPA_PM_CTRL;
+ writel(val, cpa_pmu->base + CPA_CFG_REG);
+}
+
+static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu)
+{
+ u32 val;
+
+ val = readl(cpa_pmu->base + CPA_CFG_REG);
+ val &= ~(CPA_PM_CTRL);
+ writel(val, cpa_pmu->base + CPA_CFG_REG);
+}
+
+static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Enable counter index in CPA_EVENT_CTRL register */
+ val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
+ val |= 1 << hwc->idx;
+ writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
+}
+
+static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Clear counter index in CPA_EVENT_CTRL register */
+ val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
+ val &= ~(1UL << hwc->idx);
+ writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
+}
+
+static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 0 to enable interrupt */
+ val = readl(cpa_pmu->base + CPA_INT_MASK);
+ val &= ~(1UL << hwc->idx);
+ writel(val, cpa_pmu->base + CPA_INT_MASK);
+}
+
+static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu,
+ struct hw_perf_event *hwc)
+{
+ u32 val;
+
+ /* Write 1 to mask interrupt */
+ val = readl(cpa_pmu->base + CPA_INT_MASK);
+ val |= 1 << hwc->idx;
+ writel(val, cpa_pmu->base + CPA_INT_MASK);
+}
+
+static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu)
+{
+ return readl(cpa_pmu->base + CPA_INT_STATUS);
+}
+
+static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx)
+{
+ writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR);
+}
+
+static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = {
+ { "HISI0281", },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
+
+static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
+ struct hisi_pmu *cpa_pmu)
+{
+ if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
+ &cpa_pmu->sicl_id)) {
+ dev_err(&pdev->dev, "Can not read sicl-id\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id",
+ &cpa_pmu->index_id)) {
+ dev_err(&pdev->dev, "Cannot read idx-id\n");
+ return -EINVAL;
+ }
+
+ cpa_pmu->ccl_id = -1;
+ cpa_pmu->sccl_id = -1;
+ cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cpa_pmu->base))
+ return PTR_ERR(cpa_pmu->base);
+
+ cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION);
+
+ return 0;
+}
+
+static struct attribute *hisi_cpa_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-15"),
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_cpa_pmu_format_attr,
+};
+
+static struct attribute *hisi_cpa_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00),
+ HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61),
+ HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62),
+ HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1),
+ HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2),
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_cpa_pmu_events_attr,
+};
+
+static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL);
+
+static struct attribute *hisi_cpa_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_cpumask_attr_group = {
+ .attrs = hisi_cpa_pmu_cpumask_attrs,
+};
+
+static struct device_attribute hisi_cpa_pmu_identifier_attr =
+ __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL);
+
+static struct attribute *hisi_cpa_pmu_identifier_attrs[] = {
+ &hisi_cpa_pmu_identifier_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hisi_cpa_pmu_identifier_group = {
+ .attrs = hisi_cpa_pmu_identifier_attrs,
+};
+
+static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
+ &hisi_cpa_pmu_format_group,
+ &hisi_cpa_pmu_events_group,
+ &hisi_cpa_pmu_cpumask_attr_group,
+ &hisi_cpa_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = {
+ .write_evtype = hisi_cpa_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_cpa_pmu_start_counters,
+ .stop_counters = hisi_cpa_pmu_stop_counters,
+ .enable_counter = hisi_cpa_pmu_enable_counter,
+ .disable_counter = hisi_cpa_pmu_disable_counter,
+ .enable_counter_int = hisi_cpa_pmu_enable_counter_int,
+ .disable_counter_int = hisi_cpa_pmu_disable_counter_int,
+ .write_counter = hisi_cpa_pmu_write_counter,
+ .read_counter = hisi_cpa_pmu_read_counter,
+ .get_int_status = hisi_cpa_pmu_get_int_status,
+ .clear_int_status = hisi_cpa_pmu_clear_int_status,
+};
+
+static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev,
+ struct hisi_pmu *cpa_pmu)
+{
+ int ret;
+
+ ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu);
+ if (ret)
+ return ret;
+
+ ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev);
+ if (ret)
+ return ret;
+
+ cpa_pmu->counter_bits = CPA_COUNTER_BITS;
+ cpa_pmu->check_event = CPA_NR_EVENTS;
+ cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups;
+ cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops;
+ cpa_pmu->num_counters = CPA_NR_COUNTERS;
+ cpa_pmu->dev = &pdev->dev;
+ cpa_pmu->on_cpu = -1;
+
+ return 0;
+}
+
+static int hisi_cpa_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *cpa_pmu;
+ char *name;
+ int ret;
+
+ cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL);
+ if (!cpa_pmu)
+ return -ENOMEM;
+
+ ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%u",
+ cpa_pmu->sicl_id, cpa_pmu->index_id);
+ if (!name)
+ return -ENOMEM;
+
+ cpa_pmu->pmu = (struct pmu) {
+ .name = name,
+ .module = THIS_MODULE,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = hisi_uncore_pmu_event_init,
+ .pmu_enable = hisi_uncore_pmu_enable,
+ .pmu_disable = hisi_uncore_pmu_disable,
+ .add = hisi_uncore_pmu_add,
+ .del = hisi_uncore_pmu_del,
+ .start = hisi_uncore_pmu_start,
+ .stop = hisi_uncore_pmu_stop,
+ .read = hisi_uncore_pmu_read,
+ .attr_groups = cpa_pmu->pmu_events.attr_groups,
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
+ };
+
+ /* Power Management should be disabled before using CPA PMU. */
+ hisi_cpa_pmu_disable_pm(cpa_pmu);
+ ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ &cpa_pmu->node);
+ if (ret) {
+ dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
+ hisi_cpa_pmu_enable_pm(cpa_pmu);
+ return ret;
+ }
+
+ ret = perf_pmu_register(&cpa_pmu->pmu, name, -1);
+ if (ret) {
+ dev_err(cpa_pmu->dev, "PMU register failed\n");
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node);
+ hisi_cpa_pmu_enable_pm(cpa_pmu);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, cpa_pmu);
+ return ret;
+}
+
+static int hisi_cpa_pmu_remove(struct platform_device *pdev)
+{
+ struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
+
+ perf_pmu_unregister(&cpa_pmu->pmu);
+ cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ &cpa_pmu->node);
+ hisi_cpa_pmu_enable_pm(cpa_pmu);
+ return 0;
+}
+
+static struct platform_driver hisi_cpa_pmu_driver = {
+ .driver = {
+ .name = "hisi_cpa_pmu",
+ .acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_cpa_pmu_probe,
+ .remove = hisi_cpa_pmu_remove,
+};
+
+static int __init hisi_cpa_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
+ "AP_PERF_ARM_HISI_CPA_ONLINE",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("setup hotplug failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&hisi_cpa_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
+
+ return ret;
+}
+module_init(hisi_cpa_pmu_module_init);
+
+static void __exit hisi_cpa_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_cpa_pmu_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
+}
+module_exit(hisi_cpa_pmu_module_exit);
+
+MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index bad99d149172..a0ee84d97c41 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -258,13 +258,12 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *pa_pmu)
{
/*
- * Use the SCCL_ID and the index ID to identify the PA PMU,
- * while SCCL_ID is the nearst SCCL_ID from this SICL and
- * CPU core is chosen from this SCCL to manage this PMU.
+ * As PA PMU is in a SICL, use the SICL_ID and the index ID
+ * to identify the PA PMU.
*/
if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id",
- &pa_pmu->sccl_id)) {
- dev_err(&pdev->dev, "Cannot read sccl-id!\n");
+ &pa_pmu->sicl_id)) {
+ dev_err(&pdev->dev, "Cannot read sicl-id!\n");
return -EINVAL;
}
@@ -275,6 +274,7 @@ static int hisi_pa_pmu_init_data(struct platform_device *pdev,
}
pa_pmu->ccl_id = -1;
+ pa_pmu->sccl_id = -1;
pa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pa_pmu->base)) {
@@ -399,13 +399,9 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
ret = hisi_pa_pmu_dev_probe(pdev, pa_pmu);
if (ret)
return ret;
- /*
- * PA is attached in SICL and the CPU core is chosen to manage this
- * PMU which is the nearest SCCL, while its SCCL_ID is greater than
- * one with the SICL_ID.
- */
+
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%u_pa%u",
- pa_pmu->sccl_id - 1, pa_pmu->index_id);
+ pa_pmu->sicl_id, pa_pmu->index_id);
if (!name)
return -ENOMEM;
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index 358e4e284a62..980b9ee6eb14 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -458,6 +458,10 @@ static bool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{
int sccl_id, ccl_id;
+ /* If SCCL_ID is -1, the PMU is in a SICL and has no CPU affinity */
+ if (hisi_pmu->sccl_id == -1)
+ return true;
+
if (hisi_pmu->ccl_id == -1) {
/* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 7f5841d6f592..96eeddad55ff 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -81,6 +81,7 @@ struct hisi_pmu {
struct device *dev;
struct hlist_node node;
int sccl_id;
+ int sicl_id;
int ccl_id;
void __iomem *base;
/* the ID of the PMU modules */
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index ee67305f822d..282d3a071a67 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -146,12 +146,12 @@ static int tad_pmu_event_init(struct perf_event *event)
{
struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu);
- if (!event->attr.disabled)
- return -EINVAL;
-
if (event->attr.type != event->pmu->type)
return -ENOENT;
+ if (!event->attr.disabled)
+ return -EINVAL;
+
if (event->state != PERF_EVENT_STATE_OFF)
return -EINVAL;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index a1317a483512..dca3537a8dcc 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -35,7 +35,7 @@ union sbi_pmu_ctr_info {
};
};
-/**
+/*
* RISC-V doesn't have hetergenous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
*/
@@ -477,7 +477,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
{
- /**
+ /*
* No need to check the error because we are disabling all the counters
* which may include counters that are not enabled yet.
*/
@@ -494,7 +494,7 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
cpu_hw_evt->used_hw_ctrs[0], 0, 0, 0, 0);
}
-/**
+/*
* This function starts all the used counters in two step approach.
* Any counter that did not overflow can be start in a single step
* while the overflowed counters need to be started with updated initialization
@@ -563,7 +563,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* Overflow status register should only be read after counter are stopped */
overflow = csr_read(CSR_SSCOUNTOVF);
- /**
+ /*
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index be6c24577dbe..9bf0616a3069 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2565,7 +2565,12 @@ static noinline int search_ioctl(struct inode *inode,
while (1) {
ret = -EFAULT;
- if (fault_in_writeable(ubuf + sk_offset, *buf_size - sk_offset))
+ /*
+ * Ensure that the whole user buffer is faulted in at sub-page
+ * granularity, otherwise the loop may live-lock.
+ */
+ if (fault_in_subpage_writeable(ubuf + sk_offset,
+ *buf_size - sk_offset))
break;
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 82e33137f917..b66c5f389159 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -222,6 +222,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_S390_SF_ONLINE,
CPUHP_AP_PERF_ARM_CCI_ONLINE,
CPUHP_AP_PERF_ARM_CCN_ONLINE,
+ CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE,
CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE,
CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 993994cd943a..6165283bdb6f 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -1046,6 +1046,7 @@ void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
* Fault in userspace address range.
*/
size_t fault_in_writeable(char __user *uaddr, size_t size);
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
size_t fault_in_readable(const char __user *uaddr, size_t size);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 546179418ffa..5a328cf02b75 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -231,6 +231,28 @@ static inline bool pagefault_disabled(void)
*/
#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
+
+/**
+ * probe_subpage_writeable: probe the user range for write faults at sub-page
+ * granularity (e.g. arm64 MTE)
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Returns 0 on success, the number of bytes not probed on fault.
+ *
+ * It is expected that the caller checked for the write permission of each
+ * page in the range either by put_user() or GUP. The architecture port can
+ * implement a more efficient get_user() probing if the same sub-page faults
+ * are triggered by either a read or a write.
+ */
+static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline __must_check unsigned long
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 787c657bfae8..97808f958903 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -431,6 +431,8 @@ typedef struct elf64_shdr {
#define NT_ARM_PACG_KEYS 0x408 /* ARM pointer authentication generic key */
#define NT_ARM_TAGGED_ADDR_CTRL 0x409 /* arm64 tagged address control (prctl()) */
#define NT_ARM_PAC_ENABLED_KEYS 0x40a /* arm64 ptr auth enabled keys (prctl()) */
+#define NT_ARM_SSVE 0x40b /* ARM Streaming SVE registers */
+#define NT_ARM_ZA 0x40c /* ARM SME ZA registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index e998764f0262..a5e06dcbba13 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -272,6 +272,15 @@ struct prctl_mm_map {
# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL 63 /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL 64 /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+
#define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 256cf6db573c..4d57c03714f4 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -243,9 +243,8 @@ static int __init __parse_crashkernel(char *cmdline,
*crash_base = 0;
ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
-
if (!ck_cmdline)
- return -EINVAL;
+ return -ENOENT;
ck_cmdline += strlen(name);
diff --git a/kernel/sys.c b/kernel/sys.c
index 374f83e95239..b911fa6d81ab 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -117,6 +117,12 @@
#ifndef SVE_GET_VL
# define SVE_GET_VL() (-EINVAL)
#endif
+#ifndef SME_SET_VL
+# define SME_SET_VL(a) (-EINVAL)
+#endif
+#ifndef SME_GET_VL
+# define SME_GET_VL() (-EINVAL)
+#endif
#ifndef PAC_RESET_KEYS
# define PAC_RESET_KEYS(a, b) (-EINVAL)
#endif
@@ -2541,6 +2547,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_SVE_GET_VL:
error = SVE_GET_VL();
break;
+ case PR_SME_SET_VL:
+ error = SME_SET_VL(arg2);
+ break;
+ case PR_SME_GET_VL:
+ error = SME_GET_VL();
+ break;
case PR_GET_SPECULATION_CTRL:
if (arg3 || arg4 || arg5)
return -EINVAL;
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 8f4fb328133a..289311680c29 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -30,6 +30,24 @@ int ftrace_graph_active;
/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
+/*
+ * archs can override this function if they must do something
+ * to enable hook for graph tracer.
+ */
+int __weak ftrace_enable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+
+/*
+ * archs can override this function if they must do something
+ * to disable hook for graph tracer.
+ */
+int __weak ftrace_disable_ftrace_graph_caller(void)
+{
+ return 0;
+}
+
/**
* ftrace_graph_stop - set to permanently disable function graph tracing
*
diff --git a/mm/gup.c b/mm/gup.c
index f598a037eb04..501bc150792c 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1648,6 +1648,35 @@ out:
}
EXPORT_SYMBOL(fault_in_writeable);
+/**
+ * fault_in_subpage_writeable - fault in an address range for writing
+ * @uaddr: start of address range
+ * @size: size of address range
+ *
+ * Fault in a user address range for writing while checking for permissions at
+ * sub-page granularity (e.g. arm64 MTE). This function should be used when
+ * the caller cannot guarantee forward progress of a copy_to_user() loop.
+ *
+ * Returns the number of bytes not faulted in (like copy_to_user() and
+ * copy_from_user()).
+ */
+size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
+{
+ size_t faulted_in;
+
+ /*
+ * Attempt faulting in at page granularity first for page table
+ * permission checking. The arch-specific probe_subpage_writeable()
+ * functions may not check for this.
+ */
+ faulted_in = size - fault_in_writeable(uaddr, size);
+ if (faulted_in)
+ faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
+
+ return size - faulted_in;
+}
+EXPORT_SYMBOL(fault_in_subpage_writeable);
+
/*
* fault_in_safe_writeable - fault in an address range for writing
* @uaddr: start of address range