summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst3
-rw-r--r--Documentation/scheduler/index.rst1
-rw-r--r--Documentation/scheduler/sched-util-clamp.rst741
-rw-r--r--arch/alpha/kernel/process.c1
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arc/kernel/process.c3
-rw-r--r--arch/arc/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/include/asm/vmlinux.lds.h1
-rw-r--r--arch/arm/kernel/cpuidle.c4
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/kernel/smp.c6
-rw-r--r--arch/arm/mach-davinci/cpuidle.c4
-rw-r--r--arch/arm/mach-gemini/board-dt.c3
-rw-r--r--arch/arm/mach-imx/cpuidle-imx5.c4
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c8
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sl.c4
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c9
-rw-r--r--arch/arm/mach-imx/cpuidle-imx7ulp.c4
-rw-r--r--arch/arm/mach-omap2/common.h6
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c16
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c29
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c12
-rw-r--r--arch/arm/mach-omap2/pm.h2
-rw-r--r--arch/arm/mach-omap2/pm34xx.c14
-rw-r--r--arch/arm/mach-omap2/pm44xx.c2
-rw-r--r--arch/arm/mach-omap2/powerdomain.c10
-rw-r--r--arch/arm/mach-s3c/cpuidle-s3c64xx.c5
-rw-r--r--arch/arm64/kernel/cpuidle.c6
-rw-r--r--arch/arm64/kernel/idle.c1
-rw-r--r--arch/arm64/kernel/smp.c4
-rw-r--r--arch/arm64/kernel/suspend.c12
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S1
-rw-r--r--arch/csky/kernel/process.c1
-rw-r--r--arch/csky/kernel/smp.c2
-rw-r--r--arch/csky/kernel/vmlinux.lds.S1
-rw-r--r--arch/hexagon/kernel/process.c1
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/kernel/process.c1
-rw-r--r--arch/ia64/kernel/time.c1
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/loongarch/kernel/idle.c1
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S1
-rw-r--r--arch/m68k/kernel/vmlinux-nommu.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds1
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds1
-rw-r--r--arch/microblaze/kernel/process.c1
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/kernel/idle.c14
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/nios2/kernel/process.c1
-rw-r--r--arch/nios2/kernel/vmlinux.lds.S1
-rw-r--r--arch/openrisc/kernel/process.c1
-rw-r--r--arch/openrisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/kernel/idle.c5
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/riscv/kernel/process.c1
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S1
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/idle.c3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/sh/kernel/idle.c1
-rw-r--r--arch/sh/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc/kernel/leon_pmc.c4
-rw-r--r--arch/sparc/kernel/process_32.c1
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S1
-rw-r--r--arch/um/kernel/dyn.lds.S1
-rw-r--r--arch/um/kernel/process.c1
-rw-r--r--arch/um/kernel/uml.lds.S1
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S1
-rw-r--r--arch/x86/coco/tdx/tdcall.S15
-rw-r--r--arch/x86/coco/tdx/tdx.c25
-rw-r--r--arch/x86/events/amd/brs.c13
-rw-r--r--arch/x86/include/asm/atomic64_32.h44
-rw-r--r--arch/x86/include/asm/atomic64_64.h36
-rw-r--r--arch/x86/include/asm/fpu/xcr.h4
-rw-r--r--arch/x86/include/asm/irqflags.h11
-rw-r--r--arch/x86/include/asm/kvmclock.h2
-rw-r--r--arch/x86/include/asm/mwait.h14
-rw-r--r--arch/x86/include/asm/nospec-branch.h2
-rw-r--r--arch/x86/include/asm/paravirt.h8
-rw-r--r--arch/x86/include/asm/perf_event.h2
-rw-r--r--arch/x86/include/asm/pvclock.h3
-rw-r--r--arch/x86/include/asm/shared/io.h4
-rw-r--r--arch/x86/include/asm/shared/tdx.h1
-rw-r--r--arch/x86/include/asm/special_insns.h8
-rw-r--r--arch/x86/include/asm/xen/hypercall.h2
-rw-r--r--arch/x86/kernel/cpu/bugs.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kernel/kvmclock.c6
-rw-r--r--arch/x86/kernel/paravirt.c14
-rw-r--r--arch/x86/kernel/process.c65
-rw-r--r--arch/x86/kernel/pvclock.c22
-rw-r--r--arch/x86/kernel/tsc.c7
-rw-r--r--arch/x86/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/lib/memcpy_64.S5
-rw-r--r--arch/x86/lib/memmove_64.S4
-rw-r--r--arch/x86/lib/memset_64.S4
-rw-r--r--arch/x86/xen/enlighten_pv.c2
-rw-r--r--arch/x86/xen/irq.c2
-rw-r--r--arch/x86/xen/time.c12
-rw-r--r--arch/xtensa/kernel/process.c1
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S1
-rw-r--r--drivers/acpi/processor_idle.c28
-rw-r--r--drivers/base/power/runtime.c24
-rw-r--r--drivers/clk/clk.c8
-rw-r--r--drivers/cpuidle/cpuidle-arm.c4
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c12
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c15
-rw-r--r--drivers/cpuidle/cpuidle-psci.c22
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c19
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c31
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/dt_idle_states.c2
-rw-r--r--drivers/cpuidle/poll_state.c8
-rw-r--r--drivers/firmware/psci/psci.c42
-rw-r--r--drivers/idle/intel_idle.c19
-rw-r--r--drivers/perf/arm_pmu.c11
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--fs/binfmt_elf.c5
-rw-r--r--fs/exec.c4
-rw-r--r--include/asm-generic/vmlinux.lds.h9
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/clockchips.h4
-rw-r--r--include/linux/compiler_types.h18
-rw-r--r--include/linux/context_tracking.h27
-rw-r--r--include/linux/cpu.h3
-rw-r--r--include/linux/cpuidle.h50
-rw-r--r--include/linux/cpumask.h4
-rw-r--r--include/linux/math64.h4
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/mm_types.h43
-rw-r--r--include/linux/percpu-defs.h2
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/sched/clock.h8
-rw-r--r--include/linux/sched/cputime.h9
-rw-r--r--include/linux/sched/idle.h40
-rw-r--r--include/linux/thread_info.h18
-rw-r--r--include/linux/trace_recursion.h18
-rw-r--r--include/linux/tracepoint.h15
-rw-r--r--include/trace/events/rseq.h7
-rw-r--r--include/uapi/linux/auxvec.h2
-rw-r--r--include/uapi/linux/membarrier.h4
-rw-r--r--include/uapi/linux/rseq.h22
-rw-r--r--init/Kconfig4
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/cpu_pm.c9
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/locking/lockdep.c3
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rseq.c65
-rw-r--r--kernel/sched/clock.c27
-rw-r--r--kernel/sched/core.c134
-rw-r--r--kernel/sched/cpufreq_schedutil.c43
-rw-r--r--kernel/sched/cputime.c4
-rw-r--r--kernel/sched/deadline.c42
-rw-r--r--kernel/sched/fair.c389
-rw-r--r--kernel/sched/idle.c47
-rw-r--r--kernel/sched/membarrier.c39
-rw-r--r--kernel/sched/rt.c5
-rw-r--r--kernel/sched/sched.h107
-rw-r--r--kernel/sched/topology.c4
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c29
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace_preemptirq.c61
-rw-r--r--lib/bug.c15
-rw-r--r--lib/ubsan.c5
-rw-r--r--mm/kasan/kasan.h4
-rw-r--r--mm/kasan/shadow.c38
-rw-r--r--tools/objtool/check.c14
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c16
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_impl.h33
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_multi_thread.c2
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_single_thread.c6
-rw-r--r--tools/testing/selftests/rseq/.gitignore4
-rw-r--r--tools/testing/selftests/rseq/Makefile20
-rw-r--r--tools/testing/selftests/rseq/basic_percpu_ops_test.c46
-rw-r--r--tools/testing/selftests/rseq/basic_test.c4
-rw-r--r--tools/testing/selftests/rseq/compiler.h6
-rw-r--r--tools/testing/selftests/rseq/param_test.c157
-rw-r--r--tools/testing/selftests/rseq/rseq-abi.h22
-rw-r--r--tools/testing/selftests/rseq/rseq-arm-bits.h505
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h701
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64-bits.h392
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h520
-rw-r--r--tools/testing/selftests/rseq/rseq-bits-reset.h11
-rw-r--r--tools/testing/selftests/rseq/rseq-bits-template.h41
-rw-r--r--tools/testing/selftests/rseq/rseq-mips-bits.h462
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h646
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc-bits.h454
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h617
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv-bits.h410
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h529
-rw-r--r--tools/testing/selftests/rseq/rseq-s390-bits.h474
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h495
-rw-r--r--tools/testing/selftests/rseq/rseq-skip.h65
-rw-r--r--tools/testing/selftests/rseq/rseq-x86-bits.h993
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h1193
-rw-r--r--tools/testing/selftests/rseq/rseq.c91
-rw-r--r--tools/testing/selftests/rseq/rseq.h215
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh5
211 files changed, 6782 insertions, 5504 deletions
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 74cec76be9f2..5db4c4dd5bb4 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -619,6 +619,8 @@ process migrations.
and is an example of this type.
+.. _cgroupv2-limits-distributor:
+
Limits
------
@@ -635,6 +637,7 @@ process migrations.
"io.max" limits the maximum BPS and/or IOPS that a cgroup can consume
on an IO device and is an example of this type.
+.. _cgroupv2-protections-distributor:
Protections
-----------
diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst
index b430d856056a..f12d0d06de3a 100644
--- a/Documentation/scheduler/index.rst
+++ b/Documentation/scheduler/index.rst
@@ -15,6 +15,7 @@ Linux Scheduler
sched-capacity
sched-energy
schedutil
+ sched-util-clamp
sched-nice-design
sched-rt-group
sched-stats
diff --git a/Documentation/scheduler/sched-util-clamp.rst b/Documentation/scheduler/sched-util-clamp.rst
new file mode 100644
index 000000000000..74d5b7c6431d
--- /dev/null
+++ b/Documentation/scheduler/sched-util-clamp.rst
@@ -0,0 +1,741 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Utilization Clamping
+====================
+
+1. Introduction
+===============
+
+Utilization clamping, also known as util clamp or uclamp, is a scheduler
+feature that allows user space to help in managing the performance requirement
+of tasks. It was introduced in v5.3 release. The CGroup support was merged in
+v5.4.
+
+Uclamp is a hinting mechanism that allows the scheduler to understand the
+performance requirements and restrictions of the tasks, thus it helps the
+scheduler to make a better decision. And when schedutil cpufreq governor is
+used, util clamp will influence the CPU frequency selection as well.
+
+Since the scheduler and schedutil are both driven by PELT (util_avg) signals,
+util clamp acts on that to achieve its goal by clamping the signal to a certain
+point; hence the name. That is, by clamping utilization we are making the
+system run at a certain performance point.
+
+The right way to view util clamp is as a mechanism to make request or hint on
+performance constraints. It consists of two tunables:
+
+ * UCLAMP_MIN, which sets the lower bound.
+ * UCLAMP_MAX, which sets the upper bound.
+
+These two bounds will ensure a task will operate within this performance range
+of the system. UCLAMP_MIN implies boosting a task, while UCLAMP_MAX implies
+capping a task.
+
+One can tell the system (scheduler) that some tasks require a minimum
+performance point to operate at to deliver the desired user experience. Or one
+can tell the system that some tasks should be restricted from consuming too
+much resources and should not go above a specific performance point. Viewing
+the uclamp values as performance points rather than utilization is a better
+abstraction from user space point of view.
+
+As an example, a game can use util clamp to form a feedback loop with its
+perceived Frames Per Second (FPS). It can dynamically increase the minimum
+performance point required by its display pipeline to ensure no frame is
+dropped. It can also dynamically 'prime' up these tasks if it knows in the
+coming few hundred milliseconds a computationally intensive scene is about to
+happen.
+
+On mobile hardware where the capability of the devices varies a lot, this
+dynamic feedback loop offers a great flexibility to ensure best user experience
+given the capabilities of any system.
+
+Of course a static configuration is possible too. The exact usage will depend
+on the system, application and the desired outcome.
+
+Another example is in Android where tasks are classified as background,
+foreground, top-app, etc. Util clamp can be used to constrain how much
+resources background tasks are consuming by capping the performance point they
+can run at. This constraint helps reserve resources for important tasks, like
+the ones belonging to the currently active app (top-app group). Beside this
+helps in limiting how much power they consume. This can be more obvious in
+heterogeneous systems (e.g. Arm big.LITTLE); the constraint will help bias the
+background tasks to stay on the little cores which will ensure that:
+
+ 1. The big cores are free to run top-app tasks immediately. top-app
+ tasks are the tasks the user is currently interacting with, hence
+ the most important tasks in the system.
+ 2. They don't run on a power hungry core and drain battery even if they
+ are CPU intensive tasks.
+
+.. note::
+ **little cores**:
+ CPUs with capacity < 1024
+
+ **big cores**:
+ CPUs with capacity = 1024
+
+By making these uclamp performance requests, or rather hints, user space can
+ensure system resources are used optimally to deliver the best possible user
+experience.
+
+Another use case is to help with **overcoming the ramp up latency inherit in
+how scheduler utilization signal is calculated**.
+
+On the other hand, a busy task for instance that requires to run at maximum
+performance point will suffer a delay of ~200ms (PELT HALFIFE = 32ms) for the
+scheduler to realize that. This is known to affect workloads like gaming on
+mobile devices where frames will drop due to slow response time to select the
+higher frequency required for the tasks to finish their work in time. Setting
+UCLAMP_MIN=1024 will ensure such tasks will always see the highest performance
+level when they start running.
+
+The overall visible effect goes beyond better perceived user
+experience/performance and stretches to help achieve a better overall
+performance/watt if used effectively.
+
+User space can form a feedback loop with the thermal subsystem too to ensure
+the device doesn't heat up to the point where it will throttle.
+
+Both SCHED_NORMAL/OTHER and SCHED_FIFO/RR honour uclamp requests/hints.
+
+In the SCHED_FIFO/RR case, uclamp gives the option to run RT tasks at any
+performance point rather than being tied to MAX frequency all the time. Which
+can be useful on general purpose systems that run on battery powered devices.
+
+Note that by design RT tasks don't have per-task PELT signal and must always
+run at a constant frequency to combat undeterministic DVFS rampup delays.
+
+Note that using schedutil always implies a single delay to modify the frequency
+when an RT task wakes up. This cost is unchanged by using uclamp. Uclamp only
+helps picking what frequency to request instead of schedutil always requesting
+MAX for all RT tasks.
+
+See :ref:`section 3.4 <uclamp-default-values>` for default values and
+:ref:`3.4.1 <sched-util-clamp-min-rt-default>` on how to change RT tasks
+default value.
+
+2. Design
+=========
+
+Util clamp is a property of every task in the system. It sets the boundaries of
+its utilization signal; acting as a bias mechanism that influences certain
+decisions within the scheduler.
+
+The actual utilization signal of a task is never clamped in reality. If you
+inspect PELT signals at any point of time you should continue to see them as
+they are intact. Clamping happens only when needed, e.g: when a task wakes up
+and the scheduler needs to select a suitable CPU for it to run on.
+
+Since the goal of util clamp is to allow requesting a minimum and maximum
+performance point for a task to run on, it must be able to influence the
+frequency selection as well as task placement to be most effective. Both of
+which have implications on the utilization value at CPU runqueue (rq for short)
+level, which brings us to the main design challenge.
+
+When a task wakes up on an rq, the utilization signal of the rq will be
+affected by the uclamp settings of all the tasks enqueued on it. For example if
+a task requests to run at UTIL_MIN = 512, then the util signal of the rq needs
+to respect to this request as well as all other requests from all of the
+enqueued tasks.
+
+To be able to aggregate the util clamp value of all the tasks attached to the
+rq, uclamp must do some housekeeping at every enqueue/dequeue, which is the
+scheduler hot path. Hence care must be taken since any slow down will have
+significant impact on a lot of use cases and could hinder its usability in
+practice.
+
+The way this is handled is by dividing the utilization range into buckets
+(struct uclamp_bucket) which allows us to reduce the search space from every
+task on the rq to only a subset of tasks on the top-most bucket.
+
+When a task is enqueued, the counter in the matching bucket is incremented,
+and on dequeue it is decremented. This makes keeping track of the effective
+uclamp value at rq level a lot easier.
+
+As tasks are enqueued and dequeued, we keep track of the current effective
+uclamp value of the rq. See :ref:`section 2.1 <uclamp-buckets>` for details on
+how this works.
+
+Later at any path that wants to identify the effective uclamp value of the rq,
+it will simply need to read this effective uclamp value of the rq at that exact
+moment of time it needs to take a decision.
+
+For task placement case, only Energy Aware and Capacity Aware Scheduling
+(EAS/CAS) make use of uclamp for now, which implies that it is applied on
+heterogeneous systems only.
+When a task wakes up, the scheduler will look at the current effective uclamp
+value of every rq and compare it with the potential new value if the task were
+to be enqueued there. Favoring the rq that will end up with the most energy
+efficient combination.
+
+Similarly in schedutil, when it needs to make a frequency update it will look
+at the current effective uclamp value of the rq which is influenced by the set
+of tasks currently enqueued there and select the appropriate frequency that
+will satisfy constraints from requests.
+
+Other paths like setting overutilization state (which effectively disables EAS)
+make use of uclamp as well. Such cases are considered necessary housekeeping to
+allow the 2 main use cases above and will not be covered in detail here as they
+could change with implementation details.
+
+.. _uclamp-buckets:
+
+2.1. Buckets
+------------
+
+::
+
+ [struct rq]
+
+ (bottom) (top)
+
+ 0 1024
+ | |
+ +-----------+-----------+-----------+---- ----+-----------+
+ | Bucket 0 | Bucket 1 | Bucket 2 | ... | Bucket N |
+ +-----------+-----------+-----------+---- ----+-----------+
+ : : :
+ +- p0 +- p3 +- p4
+ : :
+ +- p1 +- p5
+ :
+ +- p2
+
+
+.. note::
+ The diagram above is an illustration rather than a true depiction of the
+ internal data structure.
+
+To reduce the search space when trying to decide the effective uclamp value of
+an rq as tasks are enqueued/dequeued, the whole utilization range is divided
+into N buckets where N is configured at compile time by setting
+CONFIG_UCLAMP_BUCKETS_COUNT. By default it is set to 5.
+
+The rq has a bucket for each uclamp_id tunables: [UCLAMP_MIN, UCLAMP_MAX].
+
+The range of each bucket is 1024/N. For example, for the default value of
+5 there will be 5 buckets, each of which will cover the following range:
+
+::
+
+ DELTA = round_closest(1024/5) = 204.8 = 205
+
+ Bucket 0: [0:204]
+ Bucket 1: [205:409]
+ Bucket 2: [410:614]
+ Bucket 3: [615:819]
+ Bucket 4: [820:1024]
+
+When a task p with following tunable parameters
+
+::
+
+ p->uclamp[UCLAMP_MIN] = 300
+ p->uclamp[UCLAMP_MAX] = 1024
+
+is enqueued into the rq, bucket 1 will be incremented for UCLAMP_MIN and bucket
+4 will be incremented for UCLAMP_MAX to reflect the fact the rq has a task in
+this range.
+
+The rq then keeps track of its current effective uclamp value for each
+uclamp_id.
+
+When a task p is enqueued, the rq value changes to:
+
+::
+
+ // update bucket logic goes here
+ rq->uclamp[UCLAMP_MIN] = max(rq->uclamp[UCLAMP_MIN], p->uclamp[UCLAMP_MIN])
+ // repeat for UCLAMP_MAX
+
+Similarly, when p is dequeued the rq value changes to:
+
+::
+
+ // update bucket logic goes here
+ rq->uclamp[UCLAMP_MIN] = search_top_bucket_for_highest_value()
+ // repeat for UCLAMP_MAX
+
+When all buckets are empty, the rq uclamp values are reset to system defaults.
+See :ref:`section 3.4 <uclamp-default-values>` for details on default values.
+
+
+2.2. Max aggregation
+--------------------
+
+Util clamp is tuned to honour the request for the task that requires the
+highest performance point.
+
+When multiple tasks are attached to the same rq, then util clamp must make sure
+the task that needs the highest performance point gets it even if there's
+another task that doesn't need it or is disallowed from reaching this point.
+
+For example, if there are multiple tasks attached to an rq with the following
+values:
+
+::
+
+ p0->uclamp[UCLAMP_MIN] = 300
+ p0->uclamp[UCLAMP_MAX] = 900
+
+ p1->uclamp[UCLAMP_MIN] = 500
+ p1->uclamp[UCLAMP_MAX] = 500
+
+then assuming both p0 and p1 are enqueued to the same rq, both UCLAMP_MIN
+and UCLAMP_MAX become:
+
+::
+
+ rq->uclamp[UCLAMP_MIN] = max(300, 500) = 500
+ rq->uclamp[UCLAMP_MAX] = max(900, 500) = 900
+
+As we shall see in :ref:`section 5.1 <uclamp-capping-fail>`, this max
+aggregation is the cause of one of limitations when using util clamp, in
+particular for UCLAMP_MAX hint when user space would like to save power.
+
+2.3. Hierarchical aggregation
+-----------------------------
+
+As stated earlier, util clamp is a property of every task in the system. But
+the actual applied (effective) value can be influenced by more than just the
+request made by the task or another actor on its behalf (middleware library).
+
+The effective util clamp value of any task is restricted as follows:
+
+ 1. By the uclamp settings defined by the cgroup CPU controller it is attached
+ to, if any.
+ 2. The restricted value in (1) is then further restricted by the system wide
+ uclamp settings.
+
+:ref:`Section 3 <uclamp-interfaces>` discusses the interfaces and will expand
+further on that.
+
+For now suffice to say that if a task makes a request, its actual effective
+value will have to adhere to some restrictions imposed by cgroup and system
+wide settings.
+
+The system will still accept the request even if effectively will be beyond the
+constraints, but as soon as the task moves to a different cgroup or a sysadmin
+modifies the system settings, the request will be satisfied only if it is
+within new constraints.
+
+In other words, this aggregation will not cause an error when a task changes
+its uclamp values, but rather the system may not be able to satisfy requests
+based on those factors.
+
+2.4. Range
+----------
+
+Uclamp performance request has the range of 0 to 1024 inclusive.
+
+For cgroup interface percentage is used (that is 0 to 100 inclusive).
+Just like other cgroup interfaces, you can use 'max' instead of 100.
+
+.. _uclamp-interfaces:
+
+3. Interfaces
+=============
+
+3.1. Per task interface
+-----------------------
+
+sched_setattr() syscall was extended to accept two new fields:
+
+* sched_util_min: requests the minimum performance point the system should run
+ at when this task is running. Or lower performance bound.
+* sched_util_max: requests the maximum performance point the system should run
+ at when this task is running. Or upper performance bound.
+
+For example, the following scenario have 40% to 80% utilization constraints:
+
+::
+
+ attr->sched_util_min = 40% * 1024;
+ attr->sched_util_max = 80% * 1024;
+
+When task @p is running, **the scheduler should try its best to ensure it
+starts at 40% performance level**. If the task runs for a long enough time so
+that its actual utilization goes above 80%, the utilization, or performance
+level, will be capped.
+
+The special value -1 is used to reset the uclamp settings to the system
+default.
+
+Note that resetting the uclamp value to system default using -1 is not the same
+as manually setting uclamp value to system default. This distinction is
+important because as we shall see in system interfaces, the default value for
+RT could be changed. SCHED_NORMAL/OTHER might gain similar knobs too in the
+future.
+
+3.2. cgroup interface
+---------------------
+
+There are two uclamp related values in the CPU cgroup controller:
+
+* cpu.uclamp.min
+* cpu.uclamp.max
+
+When a task is attached to a CPU controller, its uclamp values will be impacted
+as follows:
+
+* cpu.uclamp.min is a protection as described in :ref:`section 3-3 of cgroup
+ v2 documentation <cgroupv2-protections-distributor>`.
+
+ If a task uclamp_min value is lower than cpu.uclamp.min, then the task will
+ inherit the cgroup cpu.uclamp.min value.
+
+ In a cgroup hierarchy, effective cpu.uclamp.min is the max of (child,
+ parent).
+
+* cpu.uclamp.max is a limit as described in :ref:`section 3-2 of cgroup v2
+ documentation <cgroupv2-limits-distributor>`.
+
+ If a task uclamp_max value is higher than cpu.uclamp.max, then the task will
+ inherit the cgroup cpu.uclamp.max value.
+
+ In a cgroup hierarchy, effective cpu.uclamp.max is the min of (child,
+ parent).
+
+For example, given following parameters:
+
+::
+
+ p0->uclamp[UCLAMP_MIN] = // system default;
+ p0->uclamp[UCLAMP_MAX] = // system default;
+
+ p1->uclamp[UCLAMP_MIN] = 40% * 1024;
+ p1->uclamp[UCLAMP_MAX] = 50% * 1024;
+
+ cgroup0->cpu.uclamp.min = 20% * 1024;
+ cgroup0->cpu.uclamp.max = 60% * 1024;
+
+ cgroup1->cpu.uclamp.min = 60% * 1024;
+ cgroup1->cpu.uclamp.max = 100% * 1024;
+
+when p0 and p1 are attached to cgroup0, the values become:
+
+::
+
+ p0->uclamp[UCLAMP_MIN] = cgroup0->cpu.uclamp.min = 20% * 1024;
+ p0->uclamp[UCLAMP_MAX] = cgroup0->cpu.uclamp.max = 60% * 1024;
+
+ p1->uclamp[UCLAMP_MIN] = 40% * 1024; // intact
+ p1->uclamp[UCLAMP_MAX] = 50% * 1024; // intact
+
+when p0 and p1 are attached to cgroup1, these instead become:
+
+::
+
+ p0->uclamp[UCLAMP_MIN] = cgroup1->cpu.uclamp.min = 60% * 1024;
+ p0->uclamp[UCLAMP_MAX] = cgroup1->cpu.uclamp.max = 100% * 1024;
+
+ p1->uclamp[UCLAMP_MIN] = cgroup1->cpu.uclamp.min = 60% * 1024;
+ p1->uclamp[UCLAMP_MAX] = 50% * 1024; // intact
+
+Note that cgroup interfaces allows cpu.uclamp.max value to be lower than
+cpu.uclamp.min. Other interfaces don't allow that.
+
+3.3. System interface
+---------------------
+
+3.3.1 sched_util_clamp_min
+--------------------------
+
+System wide limit of allowed UCLAMP_MIN range. By default it is set to 1024,
+which means that permitted effective UCLAMP_MIN range for tasks is [0:1024].
+By changing it to 512 for example the range reduces to [0:512]. This is useful
+to restrict how much boosting tasks are allowed to acquire.
+
+Requests from tasks to go above this knob value will still succeed, but
+they won't be satisfied until it is more than p->uclamp[UCLAMP_MIN].
+
+The value must be smaller than or equal to sched_util_clamp_max.
+
+3.3.2 sched_util_clamp_max
+--------------------------
+
+System wide limit of allowed UCLAMP_MAX range. By default it is set to 1024,
+which means that permitted effective UCLAMP_MAX range for tasks is [0:1024].
+
+By changing it to 512 for example the effective allowed range reduces to
+[0:512]. This means is that no task can run above 512, which implies that all
+rqs are restricted too. IOW, the whole system is capped to half its performance
+capacity.
+
+This is useful to restrict the overall maximum performance point of the system.
+For example, it can be handy to limit performance when running low on battery
+or when the system wants to limit access to more energy hungry performance
+levels when it's in idle state or screen is off.
+
+Requests from tasks to go above this knob value will still succeed, but they
+won't be satisfied until it is more than p->uclamp[UCLAMP_MAX].
+
+The value must be greater than or equal to sched_util_clamp_min.
+
+.. _uclamp-default-values:
+
+3.4. Default values
+-------------------
+
+By default all SCHED_NORMAL/SCHED_OTHER tasks are initialized to:
+
+::
+
+ p_fair->uclamp[UCLAMP_MIN] = 0
+ p_fair->uclamp[UCLAMP_MAX] = 1024
+
+That is, by default they're boosted to run at the maximum performance point of
+changed at boot or runtime. No argument was made yet as to why we should
+provide this, but can be added in the future.
+
+For SCHED_FIFO/SCHED_RR tasks:
+
+::
+
+ p_rt->uclamp[UCLAMP_MIN] = 1024
+ p_rt->uclamp[UCLAMP_MAX] = 1024
+
+That is by default they're boosted to run at the maximum performance point of
+the system which retains the historical behavior of the RT tasks.
+
+RT tasks default uclamp_min value can be modified at boot or runtime via
+sysctl. See below section.
+
+.. _sched-util-clamp-min-rt-default:
+
+3.4.1 sched_util_clamp_min_rt_default
+-------------------------------------
+
+Running RT tasks at maximum performance point is expensive on battery powered
+devices and not necessary. To allow system developer to offer good performance
+guarantees for these tasks without pushing it all the way to maximum
+performance point, this sysctl knob allows tuning the best boost value to
+address the system requirement without burning power running at maximum
+performance point all the time.
+
+Application developer are encouraged to use the per task util clamp interface
+to ensure they are performance and power aware. Ideally this knob should be set
+to 0 by system designers and leave the task of managing performance
+requirements to the apps.
+
+4. How to use util clamp
+========================
+
+Util clamp promotes the concept of user space assisted power and performance
+management. At the scheduler level there is no info required to make the best
+decision. However, with util clamp user space can hint to the scheduler to make
+better decision about task placement and frequency selection.
+
+Best results are achieved by not making any assumptions about the system the
+application is running on and to use it in conjunction with a feedback loop to
+dynamically monitor and adjust. Ultimately this will allow for a better user
+experience at a better perf/watt.
+
+For some systems and use cases, static setup will help to achieve good results.
+Portability will be a problem in this case. How much work one can do at 100,
+200 or 1024 is different for each system. Unless there's a specific target
+system, static setup should be avoided.
+
+There are enough possibilities to create a whole framework based on util clamp
+or self contained app that makes use of it directly.
+
+4.1. Boost important and DVFS-latency-sensitive tasks
+-----------------------------------------------------
+
+A GUI task might not be busy to warrant driving the frequency high when it
+wakes up. However, it requires to finish its work within a specific time window
+to deliver the desired user experience. The right frequency it requires at
+wakeup will be system dependent. On some underpowered systems it will be high,
+on other overpowered ones it will be low or 0.
+
+This task can increase its UCLAMP_MIN value every time it misses the deadline
+to ensure on next wake up it runs at a higher performance point. It should try
+to approach the lowest UCLAMP_MIN value that allows to meet its deadline on any
+particular system to achieve the best possible perf/watt for that system.
+
+On heterogeneous systems, it might be important for this task to run on
+a faster CPU.
+
+**Generally it is advised to perceive the input as performance level or point
+which will imply both task placement and frequency selection**.
+
+4.2. Cap background tasks
+-------------------------
+
+Like explained for Android case in the introduction. Any app can lower
+UCLAMP_MAX for some background tasks that don't care about performance but
+could end up being busy and consume unnecessary system resources on the system.
+
+4.3. Powersave mode
+-------------------
+
+sched_util_clamp_max system wide interface can be used to limit all tasks from
+operating at the higher performance points which are usually energy
+inefficient.
+
+This is not unique to uclamp as one can achieve the same by reducing max
+frequency of the cpufreq governor. It can be considered a more convenient
+alternative interface.
+
+4.4. Per-app performance restriction
+------------------------------------
+
+Middleware/Utility can provide the user an option to set UCLAMP_MIN/MAX for an
+app every time it is executed to guarantee a minimum performance point and/or
+limit it from draining system power at the cost of reduced performance for
+these apps.
+
+If you want to prevent your laptop from heating up while on the go from
+compiling the kernel and happy to sacrifice performance to save power, but
+still would like to keep your browser performance intact, uclamp makes it
+possible.
+
+5. Limitations
+==============
+
+.. _uclamp-capping-fail:
+
+5.1. Capping frequency with uclamp_max fails under certain conditions
+---------------------------------------------------------------------
+
+If task p0 is capped to run at 512:
+
+::
+
+ p0->uclamp[UCLAMP_MAX] = 512
+
+and it shares the rq with p1 which is free to run at any performance point:
+
+::
+
+ p1->uclamp[UCLAMP_MAX] = 1024
+
+then due to max aggregation the rq will be allowed to reach max performance
+point:
+
+::
+
+ rq->uclamp[UCLAMP_MAX] = max(512, 1024) = 1024
+
+Assuming both p0 and p1 have UCLAMP_MIN = 0, then the frequency selection for
+the rq will depend on the actual utilization value of the tasks.
+
+If p1 is a small task but p0 is a CPU intensive task, then due to the fact that
+both are running at the same rq, p1 will cause the frequency capping to be left
+from the rq although p1, which is allowed to run at any performance point,
+doesn't actually need to run at that frequency.
+
+5.2. UCLAMP_MAX can break PELT (util_avg) signal
+------------------------------------------------
+
+PELT assumes that frequency will always increase as the signals grow to ensure
+there's always some idle time on the CPU. But with UCLAMP_MAX, this frequency
+increase will be prevented which can lead to no idle time in some
+circumstances. When there's no idle time, a task will stuck in a busy loop,
+which would result in util_avg being 1024.
+
+Combing with issue described below, this can lead to unwanted frequency spikes
+when severely capped tasks share the rq with a small non capped task.
+
+As an example if task p, which have:
+
+::
+
+ p0->util_avg = 300
+ p0->uclamp[UCLAMP_MAX] = 0
+
+wakes up on an idle CPU, then it will run at min frequency (Fmin) this
+CPU is capable of. The max CPU frequency (Fmax) matters here as well,
+since it designates the shortest computational time to finish the task's
+work on this CPU.
+
+::
+
+ rq->uclamp[UCLAMP_MAX] = 0
+
+If the ratio of Fmax/Fmin is 3, then maximum value will be:
+
+::
+
+ 300 * (Fmax/Fmin) = 900
+
+which indicates the CPU will still see idle time since 900 is < 1024. The
+_actual_ util_avg will not be 900 though, but somewhere between 300 and 900. As
+long as there's idle time, p->util_avg updates will be off by a some margin,
+but not proportional to Fmax/Fmin.
+
+::
+
+ p0->util_avg = 300 + small_error
+
+Now if the ratio of Fmax/Fmin is 4, the maximum value becomes:
+
+::
+
+ 300 * (Fmax/Fmin) = 1200
+
+which is higher than 1024 and indicates that the CPU has no idle time. When
+this happens, then the _actual_ util_avg will become:
+
+::
+
+ p0->util_avg = 1024
+
+If task p1 wakes up on this CPU, which have:
+
+::
+
+ p1->util_avg = 200
+ p1->uclamp[UCLAMP_MAX] = 1024
+
+then the effective UCLAMP_MAX for the CPU will be 1024 according to max
+aggregation rule. But since the capped p0 task was running and throttled
+severely, then the rq->util_avg will be:
+
+::
+
+ p0->util_avg = 1024
+ p1->util_avg = 200
+
+ rq->util_avg = 1024
+ rq->uclamp[UCLAMP_MAX] = 1024
+
+Hence lead to a frequency spike since if p0 wasn't throttled we should get:
+
+::
+
+ p0->util_avg = 300
+ p1->util_avg = 200
+
+ rq->util_avg = 500
+
+and run somewhere near mid performance point of that CPU, not the Fmax we get.
+
+5.3. Schedutil response time issues
+-----------------------------------
+
+schedutil has three limitations:
+
+ 1. Hardware takes non-zero time to respond to any frequency change
+ request. On some platforms can be in the order of few ms.
+ 2. Non fast-switch systems require a worker deadline thread to wake up
+ and perform the frequency change, which adds measurable overhead.
+ 3. schedutil rate_limit_us drops any requests during this rate_limit_us
+ window.
+
+If a relatively small task is doing critical job and requires a certain
+performance point when it wakes up and starts running, then all these
+limitations will prevent it from getting what it wants in the time scale it
+expects.
+
+This limitation is not only impactful when using uclamp, but will be more
+prevalent as we no longer gradually ramp up or down. We could easily be
+jumping between frequencies depending on the order tasks wake up, and their
+respective uclamp values.
+
+We regard that as a limitation of the capabilities of the underlying system
+itself.
+
+There is room to improve the behavior of schedutil rate_limit_us, but not much
+to be done for 1 or 2. They are considered hard limitations of the system.
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 65fdae9e48f3..ce20c31828a0 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -57,7 +57,6 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void)
{
wtint(0);
- raw_local_irq_enable();
}
void arch_cpu_idle_dead(void)
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 5b78d640725d..2efa7dfc798a 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -27,7 +27,6 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 3369f0700702..980b71da2f61 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -114,6 +114,8 @@ void arch_cpu_idle(void)
"sleep %0 \n"
:
:"I"(arg)); /* can't be "r" has to be embedded const */
+
+ raw_local_irq_disable();
}
#else /* ARC700 */
@@ -122,6 +124,7 @@ void arch_cpu_idle(void)
{
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n");
+ raw_local_irq_disable();
}
#endif
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 529ae50f9fe2..549c3f407918 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -85,7 +85,6 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h
index fad45c884e98..4c8632d5c432 100644
--- a/arch/arm/include/asm/vmlinux.lds.h
+++ b/arch/arm/include/asm/vmlinux.lds.h
@@ -96,7 +96,6 @@
SOFTIRQENTRY_TEXT \
TEXT_TEXT \
SCHED_TEXT \
- CPUIDLE_TEXT \
LOCK_TEXT \
KPROBES_TEXT \
ARM_STUBS_TEXT \
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
index e1684623e1b2..437ff39f7808 100644
--- a/arch/arm/kernel/cpuidle.c
+++ b/arch/arm/kernel/cpuidle.c
@@ -26,8 +26,8 @@ static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init;
*
* Returns the index passed as parameter
*/
-int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+__cpuidle int arm_cpuidle_simple_enter(struct cpuidle_device *dev, struct
+ cpuidle_driver *drv, int index)
{
cpu_do_idle();
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f811733a8fc5..c81e7be2b4ea 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -78,7 +78,6 @@ void arch_cpu_idle(void)
arm_pm_idle();
else
cpu_do_idle();
- raw_local_irq_enable();
}
void arch_cpu_idle_prepare(void)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 36e6efad89f3..0b8c25763adc 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -638,7 +638,7 @@ static void do_handle_IPI(int ipinr)
unsigned int cpu = smp_processor_id();
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) {
case IPI_WAKEUP:
@@ -685,7 +685,7 @@ static void do_handle_IPI(int ipinr)
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+ trace_ipi_exit(ipi_types[ipinr]);
}
/* Legacy version, should go away once all irqchips have been converted */
@@ -708,7 +708,7 @@ static irqreturn_t ipi_handler(int irq, void *data)
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
{
- trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
+ trace_ipi_raise(target, ipi_types[ipinr]);
__ipi_send_mask(ipi_desc[ipinr], target);
}
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index dd38785536d5..78a1575c387d 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -44,8 +44,8 @@ static void davinci_save_ddr_power(int enter, bool pdown)
}
/* Actual code that puts the SoC in different idle states */
-static int davinci_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int davinci_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
davinci_save_ddr_power(1, ddr2_pdown);
cpu_do_idle();
diff --git a/arch/arm/mach-gemini/board-dt.c b/arch/arm/mach-gemini/board-dt.c
index de0afcc8d94a..fbafe7475c02 100644
--- a/arch/arm/mach-gemini/board-dt.c
+++ b/arch/arm/mach-gemini/board-dt.c
@@ -42,8 +42,9 @@ static void gemini_idle(void)
*/
/* FIXME: Enabling interrupts here is racy! */
- local_irq_enable();
+ raw_local_irq_enable();
cpu_do_idle();
+ raw_local_irq_disable();
}
static void __init gemini_init_machine(void)
diff --git a/arch/arm/mach-imx/cpuidle-imx5.c b/arch/arm/mach-imx/cpuidle-imx5.c
index a8457c4eb99a..5ad9f2f533cd 100644
--- a/arch/arm/mach-imx/cpuidle-imx5.c
+++ b/arch/arm/mach-imx/cpuidle-imx5.c
@@ -8,8 +8,8 @@
#include <asm/system_misc.h>
#include "cpuidle.h"
-static int imx5_cpuidle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int imx5_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
arm_pm_idle();
return index;
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index d086cbae09c3..2b0d3160f993 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -17,17 +17,17 @@
static int num_idle_cpus = 0;
static DEFINE_RAW_SPINLOCK(cpuidle_lock);
-static int imx6q_enter_wait(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int imx6q_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
raw_spin_lock(&cpuidle_lock);
if (++num_idle_cpus == num_online_cpus())
imx6_set_lpm(WAIT_UNCLOCKED);
raw_spin_unlock(&cpuidle_lock);
- ct_idle_enter();
+ ct_cpuidle_enter();
cpu_do_idle();
- ct_idle_exit();
+ ct_cpuidle_exit();
raw_spin_lock(&cpuidle_lock);
if (num_idle_cpus-- == num_online_cpus())
diff --git a/arch/arm/mach-imx/cpuidle-imx6sl.c b/arch/arm/mach-imx/cpuidle-imx6sl.c
index b86ffbeb28e4..b49cd6302dce 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sl.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sl.c
@@ -11,8 +11,8 @@
#include "common.h"
#include "cpuidle.h"
-static int imx6sl_enter_wait(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int imx6sl_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
imx6_set_lpm(WAIT_UNCLOCKED);
/*
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 74ea1720e3d8..83c5cbd3748e 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -30,8 +30,8 @@ static int imx6sx_idle_finish(unsigned long val)
return 0;
}
-static int imx6sx_enter_wait(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int imx6sx_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
imx6_set_lpm(WAIT_UNCLOCKED);
@@ -47,7 +47,9 @@ static int imx6sx_enter_wait(struct cpuidle_device *dev,
cpu_pm_enter();
cpu_cluster_pm_enter();
+ ct_cpuidle_enter();
cpu_suspend(0, imx6sx_idle_finish);
+ ct_cpuidle_exit();
cpu_cluster_pm_exit();
cpu_pm_exit();
@@ -87,7 +89,8 @@ static struct cpuidle_driver imx6sx_cpuidle_driver = {
*/
.exit_latency = 300,
.target_residency = 500,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.enter = imx6sx_enter_wait,
.name = "LOW-POWER-IDLE",
.desc = "ARM power off",
diff --git a/arch/arm/mach-imx/cpuidle-imx7ulp.c b/arch/arm/mach-imx/cpuidle-imx7ulp.c
index ca86c967d19e..f55ed74acfae 100644
--- a/arch/arm/mach-imx/cpuidle-imx7ulp.c
+++ b/arch/arm/mach-imx/cpuidle-imx7ulp.c
@@ -12,8 +12,8 @@
#include "common.h"
#include "cpuidle.h"
-static int imx7ulp_enter_wait(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+static __cpuidle int imx7ulp_enter_wait(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
if (index == 1)
imx7ulp_set_lpm(ULP_PM_WAIT);
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 08034d589081..9d60799e9752 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -256,11 +256,13 @@ extern u32 omap4_get_cpu1_ns_pa_addr(void);
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
extern int omap4_mpuss_init(void);
-extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
+extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
+ bool rcuidle);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
#else
static inline int omap4_enter_lowpower(unsigned int cpu,
- unsigned int power_state)
+ unsigned int power_state,
+ bool rcuidle)
{
cpu_do_idle();
return 0;
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 090a8aafb25e..2ab5dcbfb7f6 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -133,7 +133,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
}
/* Execute ARM wfi */
- omap_sram_idle();
+ omap_sram_idle(true);
/*
* Call idle CPU PM enter notifier chain to restore
@@ -265,6 +265,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.owner = THIS_MODULE,
.states = {
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 2 + 2,
.target_residency = 5,
@@ -272,6 +273,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU ON + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 10 + 10,
.target_residency = 30,
@@ -279,6 +281,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU ON + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 50 + 50,
.target_residency = 300,
@@ -286,6 +289,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU RET + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 1500 + 1800,
.target_residency = 4000,
@@ -293,6 +297,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU OFF + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 2500 + 7500,
.target_residency = 12000,
@@ -300,6 +305,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU RET + CORE RET",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 3000 + 8500,
.target_residency = 15000,
@@ -307,6 +313,7 @@ static struct cpuidle_driver omap3_idle_driver = {
.desc = "MPU OFF + CORE RET",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 10000 + 30000,
.target_residency = 30000,
@@ -328,6 +335,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.owner = THIS_MODULE,
.states = {
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 110 + 162,
.target_residency = 5,
@@ -335,6 +343,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU ON + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 106 + 180,
.target_residency = 309,
@@ -342,6 +351,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU ON + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 107 + 410,
.target_residency = 46057,
@@ -349,6 +359,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU RET + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 121 + 3374,
.target_residency = 46057,
@@ -356,6 +367,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU OFF + CORE ON",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 855 + 1146,
.target_residency = 46057,
@@ -363,6 +375,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU RET + CORE RET",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 7580 + 4134,
.target_residency = 484329,
@@ -370,6 +383,7 @@ static struct cpuidle_driver omap3430_idle_driver = {
.desc = "MPU OFF + CORE RET",
},
{
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.enter = omap3_enter_idle_bm,
.exit_latency = 7505 + 15274,
.target_residency = 484329,
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index de37027ad758..df106524d695 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -105,7 +105,7 @@ static int omap_enter_idle_smp(struct cpuidle_device *dev,
}
raw_spin_unlock_irqrestore(&mpu_lock, flag);
- omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+ omap4_enter_lowpower(dev->cpu, cx->cpu_state, true);
raw_spin_lock_irqsave(&mpu_lock, flag);
if (cx->mpu_state_vote == num_online_cpus())
@@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
- RCU_NONIDLE(tick_broadcast_enable());
+ tick_broadcast_enable();
/* Enter broadcast mode for one-shot timers */
- RCU_NONIDLE(tick_broadcast_enter());
+ tick_broadcast_enter();
/*
* Call idle CPU PM enter notifier chain so that
@@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
/*
* Call idle CPU cluster PM enter notifier chain
@@ -178,13 +178,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
+ omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
mpuss_can_lose_context = 0;
}
}
}
- omap4_enter_lowpower(dev->cpu, cx->cpu_state);
+ omap4_enter_lowpower(dev->cpu, cx->cpu_state, true);
cpu_done[dev->cpu] = true;
/* Wakeup CPU1 only if it is not offlined */
@@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context)
gic_dist_disable();
- RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
- RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
- RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
+ clkdm_deny_idle(cpu_clkdm[1]);
+ omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
+ clkdm_allow_idle(cpu_clkdm[1]);
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
@@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit();
cpu_pm_out:
- RCU_NONIDLE(tick_broadcast_exit());
+ tick_broadcast_exit();
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
@@ -247,7 +247,8 @@ static struct cpuidle_driver omap4_idle_driver = {
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440,
.target_residency = 960,
- .flags = CPUIDLE_FLAG_COUPLED,
+ .flags = CPUIDLE_FLAG_COUPLED |
+ CPUIDLE_FLAG_RCU_IDLE,
.enter = omap_enter_idle_coupled,
.name = "C2",
.desc = "CPUx OFF, MPUSS CSWR",
@@ -256,7 +257,8 @@ static struct cpuidle_driver omap4_idle_driver = {
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
.exit_latency = 460 + 518,
.target_residency = 1100,
- .flags = CPUIDLE_FLAG_COUPLED,
+ .flags = CPUIDLE_FLAG_COUPLED |
+ CPUIDLE_FLAG_RCU_IDLE,
.enter = omap_enter_idle_coupled,
.name = "C3",
.desc = "CPUx OFF, MPUSS OSWR",
@@ -282,7 +284,8 @@ static struct cpuidle_driver omap5_idle_driver = {
/* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
.exit_latency = 48 + 60,
.target_residency = 100,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.enter = omap_enter_idle_smp,
.name = "C2",
.desc = "CPUx CSWR, MPUSS CSWR",
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 9fba98c2313a..7ad74db951f6 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -33,6 +33,7 @@
* and first to wake-up when MPUSS low power states are excercised
*/
+#include <linux/cpuidle.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/errno.h>
@@ -214,6 +215,7 @@ static void __init save_l2x0_context(void)
* of OMAP4 MPUSS subsystem
* @cpu : CPU ID
* @power_state: Low power state.
+ * @rcuidle: RCU needs to be idled
*
* MPUSS states for the context save:
* save_state =
@@ -222,7 +224,8 @@ static void __init save_l2x0_context(void)
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
* 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
*/
-int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
+__cpuidle int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state,
+ bool rcuidle)
{
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
@@ -268,6 +271,10 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
cpu_clear_prev_logic_pwrst(cpu);
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
+
+ if (rcuidle)
+ ct_cpuidle_enter();
+
set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
omap_pm_ops.scu_prepare(cpu, power_state);
l2x0_pwrst_prepare(cpu, save_state);
@@ -283,6 +290,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
gic_dist_enable();
+ if (rcuidle)
+ ct_cpuidle_exit();
+
/*
* Restore the CPUx power state to ON otherwise CPUx
* power domain can transitions to programmed low power
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 90a341b0369c..f97ff93f2fb4 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -29,7 +29,7 @@ static inline int omap4_idle_init(void)
extern void *omap3_secure_ram_storage;
extern void omap3_pm_off_mode_enable(int);
-extern void omap_sram_idle(void);
+extern void omap_sram_idle(bool rcuidle);
extern int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused);
extern int omap3_pm_get_suspend_state(struct powerdomain *pwrdm);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index d73c7b692116..68975771e633 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/cpuidle.h>
#include <trace/events/power.h>
@@ -174,7 +175,7 @@ static int omap34xx_do_sram_idle(unsigned long save_state)
return 0;
}
-void omap_sram_idle(void)
+__cpuidle void omap_sram_idle(bool rcuidle)
{
/* Variable to tell what needs to be saved and restored
* in omap_sram_idle*/
@@ -254,11 +255,18 @@ void omap_sram_idle(void)
*/
if (save_state)
omap34xx_save_context(omap3_arm_context);
+
+ if (rcuidle)
+ ct_cpuidle_enter();
+
if (save_state == 1 || save_state == 3)
cpu_suspend(save_state, omap34xx_do_sram_idle);
else
omap34xx_do_sram_idle(save_state);
+ if (rcuidle)
+ ct_cpuidle_exit();
+
/* Restore normal SDRC POWER settings */
if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
(omap_type() == OMAP2_DEVICE_TYPE_EMU ||
@@ -294,7 +302,7 @@ static void omap3_pm_idle(void)
if (omap_irq_pending())
return;
- omap_sram_idle();
+ omap3_do_wfi();
}
#ifdef CONFIG_SUSPEND
@@ -316,7 +324,7 @@ static int omap3_pm_suspend(void)
omap3_intc_suspend();
- omap_sram_idle();
+ omap_sram_idle(false);
restore:
/* Restore next_pwrsts */
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 5a7a949ae965..f57802f3ee3a 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -76,7 +76,7 @@ static int omap4_pm_suspend(void)
* domain CSWR is not supported by hardware.
* More details can be found in OMAP4430 TRM section 4.3.4.2.
*/
- omap4_enter_lowpower(cpu_id, cpu_suspend_state);
+ omap4_enter_lowpower(cpu_id, cpu_suspend_state, false);
/* Restore next powerdomain state */
list_for_each_entry(pwrst, &pwrst_list, node) {
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index fd974514a7b2..777f9f8e7cd8 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -187,9 +187,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
trace_state = (PWRDM_TRACE_STATES_FLAG |
((next & OMAP_POWERSTATE_MASK) << 8) |
((prev & OMAP_POWERSTATE_MASK) << 0));
- trace_power_domain_target_rcuidle(pwrdm->name,
- trace_state,
- raw_smp_processor_id());
+ trace_power_domain_target(pwrdm->name,
+ trace_state,
+ raw_smp_processor_id());
}
break;
default:
@@ -541,8 +541,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
/* Trace the pwrdm desired target state */
- trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
- raw_smp_processor_id());
+ trace_power_domain_target(pwrdm->name, pwrst,
+ raw_smp_processor_id());
/* Program the pwrdm desired target state */
ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
}
diff --git a/arch/arm/mach-s3c/cpuidle-s3c64xx.c b/arch/arm/mach-s3c/cpuidle-s3c64xx.c
index b1c5f43d4922..27a13cc27893 100644
--- a/arch/arm/mach-s3c/cpuidle-s3c64xx.c
+++ b/arch/arm/mach-s3c/cpuidle-s3c64xx.c
@@ -19,9 +19,8 @@
#include "regs-sys-s3c64xx.h"
#include "regs-syscon-power-s3c64xx.h"
-static int s3c64xx_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int s3c64xx_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
unsigned long tmp;
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index 4150e308e99c..42e19fff40ee 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -62,15 +62,15 @@ int acpi_processor_ffh_lpi_probe(unsigned int cpu)
return psci_acpi_cpu_init_idle(cpu);
}
-int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+__cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
{
u32 state = lpi->address;
if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter,
lpi->index, state);
else
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
+ return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter,
lpi->index, state);
}
#endif
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
index a2cfbacec2bb..c1125753fe9b 100644
--- a/arch/arm64/kernel/idle.c
+++ b/arch/arm64/kernel/idle.c
@@ -42,5 +42,4 @@ void noinstr arch_cpu_idle(void)
* tricks
*/
cpu_do_idle();
- raw_local_irq_enable();
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ffc5d76cf695..4e8327264255 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -865,7 +865,7 @@ static void do_handle_IPI(int ipinr)
unsigned int cpu = smp_processor_id();
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ trace_ipi_entry(ipi_types[ipinr]);
switch (ipinr) {
case IPI_RESCHEDULE:
@@ -914,7 +914,7 @@ static void do_handle_IPI(int ipinr)
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+ trace_ipi_exit(ipi_types[ipinr]);
}
static irqreturn_t ipi_handler(int irq, void *data)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index e7163f31f716..0fbdf5fe64d8 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -4,6 +4,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pgtable.h>
+#include <linux/cpuidle.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
@@ -104,6 +105,10 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* From this point debug exceptions are disabled to prevent
* updates to mdscr register (saved and restored along with
* general purpose registers) from kernel debuggers.
+ *
+ * Strictly speaking the trace_hardirqs_off() here is superfluous,
+ * hardirqs should be firmly off by now. This really ought to use
+ * something like raw_local_daif_save().
*/
flags = local_daif_save();
@@ -120,6 +125,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
arm_cpuidle_save_irq_context(&context);
+ ct_cpuidle_enter();
+
if (__cpu_suspend_enter(&state)) {
/* Call the suspend finisher */
ret = fn(arg);
@@ -133,8 +140,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
if (!ret)
ret = -EOPNOTSUPP;
+
+ ct_cpuidle_exit();
} else {
- RCU_NONIDLE(__cpu_suspend_exit());
+ ct_cpuidle_exit();
+ __cpu_suspend_exit();
}
arm_cpuidle_restore_irq_context(&context);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 4c13dafc98b8..2777214cbf1a 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -175,7 +175,6 @@ SECTIONS
ENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
HYPERVISOR_TEXT
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index 2b0ed515a88e..0c6e4b17fe00 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -100,6 +100,5 @@ void arch_cpu_idle(void)
#ifdef CONFIG_CPU_PM_STOP
asm volatile("stop\n");
#endif
- raw_local_irq_enable();
}
#endif
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 4b605aa2e1d6..b45d1073307f 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -309,7 +309,7 @@ void arch_cpu_idle_dead(void)
while (!secondary_stack)
arch_cpu_idle();
- local_irq_disable();
+ raw_local_irq_disable();
asm volatile(
"mov sp, %0\n"
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index 68c980d08482..d718961786d2 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -34,7 +34,6 @@ SECTIONS
SOFTIRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index e15eeaebd785..dd7f74ea2c20 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -44,7 +44,6 @@ void arch_cpu_idle(void)
{
__vmwait();
/* interrupts wake us up, but irqs are still disabled */
- raw_local_irq_enable();
}
/*
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 57465bff1fe4..1140051a0c45 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -41,7 +41,6 @@ SECTIONS
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 416305e550e2..f6195a0a00ae 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -242,6 +242,7 @@ void arch_cpu_idle(void)
(*mark_idle)(1);
raw_safe_halt();
+ raw_local_irq_disable();
if (mark_idle)
(*mark_idle)(0);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index fa9c0ab8c6fc..83ef044b63ef 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include <linux/sched/cputime.h>
+#include <asm/cputime.h>
#include <asm/delay.h>
#include <asm/efi.h>
#include <asm/hw_irq.h>
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 9b265783be6a..53dfde161c8a 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -51,7 +51,6 @@ SECTIONS {
__end_ivt_text = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
index 1a65d0527d25..0b5dd2faeb90 100644
--- a/arch/loongarch/kernel/idle.c
+++ b/arch/loongarch/kernel/idle.c
@@ -13,4 +13,5 @@ void __cpuidle arch_cpu_idle(void)
{
raw_local_irq_enable();
__arch_cpu_idle(); /* idle instruction needs irq enabled */
+ raw_local_irq_disable();
}
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 733b16e8d55d..78506b31ba61 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -43,7 +43,6 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
index 387f334e87d3..2624fc18c131 100644
--- a/arch/m68k/kernel/vmlinux-nommu.lds
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -48,7 +48,6 @@ SECTIONS {
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
. = ALIGN(16);
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index ed1d9eda3190..1ccdd04ae462 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -19,7 +19,6 @@ SECTIONS
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 4a52f44f2ef0..f13ddcc2af5c 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -19,7 +19,6 @@ SECTIONS
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.gnu.warning)
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 1f802aab2b96..56342e11442d 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -140,5 +140,4 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
void arch_cpu_idle(void)
{
- raw_local_irq_enable();
}
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index fb31747ec092..ae50d3d04a7d 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -36,7 +36,6 @@ SECTIONS {
EXIT_TEXT
EXIT_CALL
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 53adcc1b2ed5..5abc8b7340f8 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -33,13 +33,13 @@ static void __cpuidle r3081_wait(void)
{
unsigned long cfg = read_c0_conf();
write_c0_conf(cfg | R30XX_CONF_HALT);
- raw_local_irq_enable();
}
void __cpuidle r4k_wait(void)
{
raw_local_irq_enable();
__r4k_wait();
+ raw_local_irq_disable();
}
/*
@@ -57,7 +57,6 @@ void __cpuidle r4k_wait_irqoff(void)
" .set arch=r4000 \n"
" wait \n"
" .set pop \n");
- raw_local_irq_enable();
}
/*
@@ -77,7 +76,6 @@ static void __cpuidle rm7k_wait_irqoff(void)
" wait \n"
" mtc0 $1, $12 # stalls until W stage \n"
" .set pop \n");
- raw_local_irq_enable();
}
/*
@@ -103,6 +101,8 @@ static void __cpuidle au1k_wait(void)
" nop \n"
" .set pop \n"
: : "r" (au1k_wait), "r" (c0status));
+
+ raw_local_irq_disable();
}
static int __initdata nowait;
@@ -241,18 +241,16 @@ void __init check_wait(void)
}
}
-void arch_cpu_idle(void)
+__cpuidle void arch_cpu_idle(void)
{
if (cpu_wait)
cpu_wait();
- else
- raw_local_irq_enable();
}
#ifdef CONFIG_CPU_IDLE
-int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int index)
+__cpuidle int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
{
arch_cpu_idle();
return index;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 1f98947fe715..52cbde60edf5 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -61,7 +61,6 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index 29593b98567d..f84021303f6a 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -33,7 +33,6 @@ EXPORT_SYMBOL(pm_power_off);
void arch_cpu_idle(void)
{
- raw_local_irq_enable();
}
/*
diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S
index 126e114744cb..37b958055064 100644
--- a/arch/nios2/kernel/vmlinux.lds.S
+++ b/arch/nios2/kernel/vmlinux.lds.S
@@ -24,7 +24,6 @@ SECTIONS
.text : {
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index f94b5ec06786..dfa558f98ed8 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -102,6 +102,7 @@ void arch_cpu_idle(void)
raw_local_irq_enable();
if (mfspr(SPR_UPR) & SPR_UPR_PMP)
mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
+ raw_local_irq_disable();
}
void (*pm_power_off)(void) = NULL;
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index d5c7bb0fae57..bc1306047837 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -52,7 +52,6 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index c4f8374c7018..c064719b49b0 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -183,8 +183,6 @@ void arch_cpu_idle_dead(void)
void __cpuidle arch_cpu_idle(void)
{
- raw_local_irq_enable();
-
/* nop on real hardware, qemu will idle sleep. */
asm volatile("or %%r10,%%r10,%%r10\n":::);
}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 2769eb991f58..1aaa2ca09800 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -86,7 +86,6 @@ SECTIONS
TEXT_TEXT
LOCK_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 77cd4c5a2d63..b9a725abc596 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -51,10 +51,9 @@ void arch_cpu_idle(void)
* Some power_save functions return with
* interrupts enabled, some don't.
*/
- if (irqs_disabled())
- raw_local_irq_enable();
+ if (!irqs_disabled())
+ raw_local_irq_disable();
} else {
- raw_local_irq_enable();
/*
* Go into low thread priority and possibly
* low power mode.
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 958e77a24f85..f128c7cf9c1d 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -112,7 +112,6 @@ SECTIONS
#endif
NOINSTR_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 8955f2432c2d..774ffde386ab 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -39,7 +39,6 @@ extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void)
{
cpu_do_idle();
- raw_local_irq_enable();
}
void __show_regs(struct pt_regs *regs)
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 75e0fa8a700a..eab9edc3b631 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -39,7 +39,6 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
ENTRY_TEXT
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 4e6c88aa4d87..643ab60e9efb 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -42,7 +42,6 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
ENTRY_TEXT
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 4bf1ee293f2b..b04fb418307c 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -12,9 +12,9 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
-#include <linux/sched/cputime.h>
#include <trace/events/power.h>
#include <asm/cpu_mf.h>
+#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
@@ -66,7 +66,6 @@ void arch_cpu_idle(void)
idle->idle_count++;
account_idle_time(cputime_to_nsecs(idle_time));
raw_write_seqcount_end(&idle->seqcount);
- raw_local_irq_enable();
}
static ssize_t show_idle_count(struct device *dev,
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index cbf9c1b0beda..20262e3c0cff 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -44,7 +44,6 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9436f3053b88..e0a88dcaf5cb 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -7,13 +7,13 @@
*/
#include <linux/kernel_stat.h>
-#include <linux/sched/cputime.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/types.h>
#include <linux/time.h>
#include <asm/alternative.h>
+#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
#include <asm/cpu_mf.h>
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c
index f59814983bd5..3418c40f0099 100644
--- a/arch/sh/kernel/idle.c
+++ b/arch/sh/kernel/idle.c
@@ -25,6 +25,7 @@ void default_idle(void)
raw_local_irq_enable();
/* Isn't this racy ? */
cpu_sleep();
+ raw_local_irq_disable();
clear_bl_bit();
}
diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S
index b6276a3521d7..9644fe187a3f 100644
--- a/arch/sh/kernel/vmlinux.lds.S
+++ b/arch/sh/kernel/vmlinux.lds.S
@@ -30,7 +30,6 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 396f46bca52e..6c00cbad7fb5 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -57,6 +57,8 @@ static void pmc_leon_idle_fixup(void)
"lda [%0] %1, %%g0\n"
:
: "r"(address), "i"(ASI_LEON_BYPASS));
+
+ raw_local_irq_disable();
}
/*
@@ -70,6 +72,8 @@ static void pmc_leon_idle(void)
/* For systems without power-down, this will be no-op */
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
+
+ raw_local_irq_disable();
}
/* Install LEON Power Down function */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 33b0215a4182..9c7c662cb565 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -71,7 +71,6 @@ void arch_cpu_idle(void)
{
if (sparc_idle)
(*sparc_idle)();
- raw_local_irq_enable();
}
/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 6335b698a4b4..91c2b8124527 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -59,7 +59,6 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
- raw_local_irq_enable();
} else {
unsigned long pstate;
@@ -90,6 +89,8 @@ void arch_cpu_idle(void)
"wrpr %0, %%g0, %%pstate"
: "=&r" (pstate)
: "i" (PSTATE_IE));
+
+ raw_local_irq_disable();
}
}
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index d55ae65a07ad..d317a843f7ea 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -50,7 +50,6 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index 2b7fc5b54164..3385d653ebd0 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -74,7 +74,6 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 47830ade35ed..106b7da2f8d6 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -218,7 +218,6 @@ void arch_cpu_idle(void)
{
cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
um_idle_sleep();
- raw_local_irq_enable();
}
int __cant_sleep(void) {
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 71a59b8adbdc..5c92d58a78e8 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -35,7 +35,6 @@ SECTIONS
_stext = .;
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index 112b2375d021..b22f34b8684a 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -34,6 +34,7 @@ SECTIONS
_text = .; /* Text */
*(.text)
*(.text.*)
+ *(.noinstr.text)
_etext = . ;
}
.rodata : {
diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S
index f9eb1134f22d..ad0d51f03cb4 100644
--- a/arch/x86/coco/tdx/tdcall.S
+++ b/arch/x86/coco/tdx/tdcall.S
@@ -31,6 +31,8 @@
TDX_R12 | TDX_R13 | \
TDX_R14 | TDX_R15 )
+.section .noinstr.text, "ax"
+
/*
* __tdx_module_call() - Used by TDX guests to request services from
* the TDX module (does not include VMM services) using TDCALL instruction.
@@ -139,19 +141,6 @@ SYM_FUNC_START(__tdx_hypercall)
movl $TDVMCALL_EXPOSE_REGS_MASK, %ecx
- /*
- * For the idle loop STI needs to be called directly before the TDCALL
- * that enters idle (EXIT_REASON_HLT case). STI instruction enables
- * interrupts only one instruction later. If there is a window between
- * STI and the instruction that emulates the HALT state, there is a
- * chance for interrupts to happen in this window, which can delay the
- * HLT operation indefinitely. Since this is the not the desired
- * result, conditionally call STI before TDCALL.
- */
- testq $TDX_HCALL_ISSUE_STI, %rsi
- jz .Lskip_sti
- sti
-.Lskip_sti:
tdcall
/*
diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c
index 669d9e4f2901..3bd111d5e6a0 100644
--- a/arch/x86/coco/tdx/tdx.c
+++ b/arch/x86/coco/tdx/tdx.c
@@ -64,8 +64,9 @@ static inline u64 _tdx_hypercall(u64 fn, u64 r12, u64 r13, u64 r14, u64 r15)
}
/* Called from __tdx_hypercall() for unrecoverable failure */
-void __tdx_hypercall_failed(void)
+noinstr void __tdx_hypercall_failed(void)
{
+ instrumentation_begin();
panic("TDVMCALL failed. TDX module bug?");
}
@@ -75,7 +76,7 @@ void __tdx_hypercall_failed(void)
* Reusing the KVM EXIT_REASON macros makes it easier to connect the host and
* guest sides of these calls.
*/
-static u64 hcall_func(u64 exit_reason)
+static __always_inline u64 hcall_func(u64 exit_reason)
{
return exit_reason;
}
@@ -220,7 +221,7 @@ static int ve_instr_len(struct ve_info *ve)
}
}
-static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
+static u64 __cpuidle __halt(const bool irq_disabled)
{
struct tdx_hypercall_args args = {
.r10 = TDX_HYPERCALL_STANDARD,
@@ -240,20 +241,14 @@ static u64 __cpuidle __halt(const bool irq_disabled, const bool do_sti)
* can keep the vCPU in virtual HLT, even if an IRQ is
* pending, without hanging/breaking the guest.
*/
- return __tdx_hypercall(&args, do_sti ? TDX_HCALL_ISSUE_STI : 0);
+ return __tdx_hypercall(&args, 0);
}
static int handle_halt(struct ve_info *ve)
{
- /*
- * Since non safe halt is mainly used in CPU offlining
- * and the guest will always stay in the halt state, don't
- * call the STI instruction (set do_sti as false).
- */
const bool irq_disabled = irqs_disabled();
- const bool do_sti = false;
- if (__halt(irq_disabled, do_sti))
+ if (__halt(irq_disabled))
return -EIO;
return ve_instr_len(ve);
@@ -261,18 +256,12 @@ static int handle_halt(struct ve_info *ve)
void __cpuidle tdx_safe_halt(void)
{
- /*
- * For do_sti=true case, __tdx_hypercall() function enables
- * interrupts using the STI instruction before the TDCALL. So
- * set irq_disabled as false.
- */
const bool irq_disabled = false;
- const bool do_sti = true;
/*
* Use WARN_ONCE() to report the failure.
*/
- if (__halt(irq_disabled, do_sti))
+ if (__halt(irq_disabled))
WARN_ONCE(1, "HLT instruction emulation failed\n");
}
diff --git a/arch/x86/events/amd/brs.c b/arch/x86/events/amd/brs.c
index 58461fa18b6f..ed308719236c 100644
--- a/arch/x86/events/amd/brs.c
+++ b/arch/x86/events/amd/brs.c
@@ -41,18 +41,15 @@ static inline unsigned int brs_to(int idx)
return MSR_AMD_SAMP_BR_FROM + 2 * idx + 1;
}
-static inline void set_debug_extn_cfg(u64 val)
+static __always_inline void set_debug_extn_cfg(u64 val)
{
/* bits[4:3] must always be set to 11b */
- wrmsrl(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
+ __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
}
-static inline u64 get_debug_extn_cfg(void)
+static __always_inline u64 get_debug_extn_cfg(void)
{
- u64 val;
-
- rdmsrl(MSR_AMD_DBG_EXTN_CFG, val);
- return val;
+ return __rdmsr(MSR_AMD_DBG_EXTN_CFG);
}
static bool __init amd_brs_detect(void)
@@ -405,7 +402,7 @@ void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_i
* called from ACPI processor_idle.c or acpi_pad.c
* with interrupts disabled
*/
-void perf_amd_brs_lopwr_cb(bool lopwr_in)
+void noinstr perf_amd_brs_lopwr_cb(bool lopwr_in)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
union amd_debug_extn_cfg cfg;
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 5efd01b548d1..808b4eece251 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -71,7 +71,7 @@ ATOMIC64_DECL(add_unless);
* the old value.
*/
-static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
+static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
return arch_cmpxchg64(&v->counter, o, n);
}
@@ -85,7 +85,7 @@ static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
* Atomically xchgs the value of @v to @n and returns
* the old value.
*/
-static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
+static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
{
s64 o;
unsigned high = (unsigned)(n >> 32);
@@ -104,7 +104,7 @@ static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
*
* Atomically sets the value of @v to @n.
*/
-static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
@@ -119,7 +119,7 @@ static inline void arch_atomic64_set(atomic64_t *v, s64 i)
*
* Atomically reads the value of @v and returns it.
*/
-static inline s64 arch_atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 r;
alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
@@ -133,7 +133,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*
* Atomically adds @i to @v and returns @i + *@v
*/
-static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
alternative_atomic64(add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -145,7 +145,7 @@ static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
/*
* Other variants with different arithmetic operators:
*/
-static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
alternative_atomic64(sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -154,7 +154,7 @@ static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
-static inline s64 arch_atomic64_inc_return(atomic64_t *v)
+static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
{
s64 a;
alternative_atomic64(inc_return, "=&A" (a),
@@ -163,7 +163,7 @@ static inline s64 arch_atomic64_inc_return(atomic64_t *v)
}
#define arch_atomic64_inc_return arch_atomic64_inc_return
-static inline s64 arch_atomic64_dec_return(atomic64_t *v)
+static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
{
s64 a;
alternative_atomic64(dec_return, "=&A" (a),
@@ -179,7 +179,7 @@ static inline s64 arch_atomic64_dec_return(atomic64_t *v)
*
* Atomically adds @i to @v.
*/
-static inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
{
__alternative_atomic64(add, add_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -194,7 +194,7 @@ static inline s64 arch_atomic64_add(s64 i, atomic64_t *v)
*
* Atomically subtracts @i from @v.
*/
-static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
{
__alternative_atomic64(sub, sub_return,
ASM_OUTPUT2("+A" (i), "+c" (v)),
@@ -208,7 +208,7 @@ static inline s64 arch_atomic64_sub(s64 i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
-static inline void arch_atomic64_inc(atomic64_t *v)
+static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
@@ -221,7 +221,7 @@ static inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
-static inline void arch_atomic64_dec(atomic64_t *v)
+static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
@@ -237,7 +237,7 @@ static inline void arch_atomic64_dec(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if the add was done, zero otherwise.
*/
-static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned low = (unsigned)u;
unsigned high = (unsigned)(u >> 32);
@@ -248,7 +248,7 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
}
#define arch_atomic64_add_unless arch_atomic64_add_unless
-static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
+static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
alternative_atomic64(inc_not_zero, "=&a" (r),
@@ -257,7 +257,7 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
}
#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
-static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 r;
alternative_atomic64(dec_if_positive, "=&A" (r),
@@ -269,7 +269,7 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
#undef alternative_atomic64
#undef __alternative_atomic64
-static inline void arch_atomic64_and(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
s64 old, c = 0;
@@ -277,7 +277,7 @@ static inline void arch_atomic64_and(s64 i, atomic64_t *v)
c = old;
}
-static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 old, c = 0;
@@ -288,7 +288,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-static inline void arch_atomic64_or(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
s64 old, c = 0;
@@ -296,7 +296,7 @@ static inline void arch_atomic64_or(s64 i, atomic64_t *v)
c = old;
}
-static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 old, c = 0;
@@ -307,7 +307,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
s64 old, c = 0;
@@ -315,7 +315,7 @@ static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
c = old;
}
-static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 old, c = 0;
@@ -326,7 +326,7 @@ static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
}
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
-static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
s64 old, c = 0;
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 7886d0578fc9..c496595bf601 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -17,7 +17,7 @@
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
-static inline s64 arch_atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return __READ_ONCE((v)->counter);
}
@@ -29,7 +29,7 @@ static inline s64 arch_atomic64_read(const atomic64_t *v)
*
* Atomically sets the value of @v to @i.
*/
-static inline void arch_atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__WRITE_ONCE(v->counter, i);
}
@@ -55,7 +55,7 @@ static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
*
* Atomically subtracts @i from @v.
*/
-static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
@@ -71,7 +71,7 @@ static inline void arch_atomic64_sub(s64 i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-static inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
+static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
}
@@ -113,7 +113,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
+static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
}
@@ -127,7 +127,7 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
+static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
}
@@ -142,7 +142,7 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-static inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
+static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
{
return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
}
@@ -161,25 +161,25 @@ static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
}
#define arch_atomic64_add_return arch_atomic64_add_return
-static inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
{
return arch_atomic64_add_return(-i, v);
}
#define arch_atomic64_sub_return arch_atomic64_sub_return
-static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-static inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
-static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return arch_cmpxchg(&v->counter, old, new);
}
@@ -191,13 +191,13 @@ static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s
}
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
-static inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
+static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
{
return arch_xchg(&v->counter, new);
}
#define arch_atomic64_xchg arch_atomic64_xchg
-static inline void arch_atomic64_and(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "andq %1,%0"
: "+m" (v->counter)
@@ -205,7 +205,7 @@ static inline void arch_atomic64_and(s64 i, atomic64_t *v)
: "memory");
}
-static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
@@ -215,7 +215,7 @@ static inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
}
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
-static inline void arch_atomic64_or(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "orq %1,%0"
: "+m" (v->counter)
@@ -223,7 +223,7 @@ static inline void arch_atomic64_or(s64 i, atomic64_t *v)
: "memory");
}
-static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
@@ -233,7 +233,7 @@ static inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
}
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
-static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
+static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "xorq %1,%0"
: "+m" (v->counter)
@@ -241,7 +241,7 @@ static inline void arch_atomic64_xor(s64 i, atomic64_t *v)
: "memory");
}
-static inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
+static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
s64 val = arch_atomic64_read(v);
diff --git a/arch/x86/include/asm/fpu/xcr.h b/arch/x86/include/asm/fpu/xcr.h
index 9656a5bc6fea..9a710c060445 100644
--- a/arch/x86/include/asm/fpu/xcr.h
+++ b/arch/x86/include/asm/fpu/xcr.h
@@ -5,7 +5,7 @@
#define XCR_XFEATURE_ENABLED_MASK 0x00000000
#define XCR_XFEATURE_IN_USE_MASK 0x00000001
-static inline u64 xgetbv(u32 index)
+static __always_inline u64 xgetbv(u32 index)
{
u32 eax, edx;
@@ -27,7 +27,7 @@ static inline void xsetbv(u32 index, u64 value)
*
* Callers should check X86_FEATURE_XGETBV1.
*/
-static inline u64 xfeatures_in_use(void)
+static __always_inline u64 xfeatures_in_use(void)
{
return xgetbv(XCR_XFEATURE_IN_USE_MASK);
}
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 7793e52d6237..8c5ae649d2df 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -8,9 +8,6 @@
#include <asm/nospec-branch.h>
-/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
-#define __cpuidle __section(".cpuidle.text")
-
/*
* Interrupt control:
*/
@@ -45,13 +42,13 @@ static __always_inline void native_irq_enable(void)
asm volatile("sti": : :"memory");
}
-static inline __cpuidle void native_safe_halt(void)
+static __always_inline void native_safe_halt(void)
{
mds_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
}
-static inline __cpuidle void native_halt(void)
+static __always_inline void native_halt(void)
{
mds_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory");
@@ -84,7 +81,7 @@ static __always_inline void arch_local_irq_enable(void)
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
-static inline __cpuidle void arch_safe_halt(void)
+static __always_inline void arch_safe_halt(void)
{
native_safe_halt();
}
@@ -93,7 +90,7 @@ static inline __cpuidle void arch_safe_halt(void)
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
-static inline __cpuidle void halt(void)
+static __always_inline void halt(void)
{
native_halt();
}
diff --git a/arch/x86/include/asm/kvmclock.h b/arch/x86/include/asm/kvmclock.h
index 6c5765192102..511b35069187 100644
--- a/arch/x86/include/asm/kvmclock.h
+++ b/arch/x86/include/asm/kvmclock.h
@@ -8,7 +8,7 @@ extern struct clocksource kvm_clock;
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
-static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
+static __always_inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
return &this_cpu_read(hv_clock_per_cpu)->pvti;
}
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 3a8fdf881313..778df05f8539 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -26,7 +26,7 @@
#define TPAUSE_C01_STATE 1
#define TPAUSE_C02_STATE 0
-static inline void __monitor(const void *eax, unsigned long ecx,
+static __always_inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitor %eax, %ecx, %edx;" */
@@ -34,7 +34,7 @@ static inline void __monitor(const void *eax, unsigned long ecx,
:: "a" (eax), "c" (ecx), "d"(edx));
}
-static inline void __monitorx(const void *eax, unsigned long ecx,
+static __always_inline void __monitorx(const void *eax, unsigned long ecx,
unsigned long edx)
{
/* "monitorx %eax, %ecx, %edx;" */
@@ -42,7 +42,7 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
:: "a" (eax), "c" (ecx), "d"(edx));
}
-static inline void __mwait(unsigned long eax, unsigned long ecx)
+static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
{
mds_idle_clear_cpu_buffers();
@@ -77,8 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
* EAX (logical) address to monitor
* ECX #GP if not zero
*/
-static inline void __mwaitx(unsigned long eax, unsigned long ebx,
- unsigned long ecx)
+static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ unsigned long ecx)
{
/* No MDS buffer clear as this is AMD/HYGON only */
@@ -87,7 +87,7 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
:: "a" (eax), "b" (ebx), "c" (ecx));
}
-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */
@@ -105,7 +105,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
* New with Core Duo processors, MWAIT can take some hints based on CPU
* capability.
*/
-static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 771b0a2b7a34..e04313e89f4f 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -564,7 +564,7 @@ static __always_inline void mds_user_clear_cpu_buffers(void)
*
* Clear CPU buffers if the corresponding static key is enabled
*/
-static inline void mds_idle_clear_cpu_buffers(void)
+static __always_inline void mds_idle_clear_cpu_buffers(void)
{
if (static_branch_likely(&mds_idle_clear))
mds_clear_cpu_buffers();
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 73e9522db7c1..cf40e813b3d7 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -26,7 +26,7 @@ DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
void paravirt_set_sched_clock(u64 (*func)(void));
-static inline u64 paravirt_sched_clock(void)
+static __always_inline u64 paravirt_sched_clock(void)
{
return static_call(pv_sched_clock)();
}
@@ -168,7 +168,7 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
-static inline void arch_safe_halt(void)
+static __always_inline void arch_safe_halt(void)
{
PVOP_VCALL0(irq.safe_halt);
}
@@ -178,7 +178,9 @@ static inline void halt(void)
PVOP_VCALL0(irq.halt);
}
-static inline void wbinvd(void)
+extern noinstr void pv_native_wbinvd(void);
+
+static __always_inline void wbinvd(void)
{
PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
}
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 6496bdbcac98..8fc15ed5e60b 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -586,7 +586,7 @@ extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
-static inline void perf_lopwr_cb(bool lopwr_in)
+static __always_inline void perf_lopwr_cb(bool lopwr_in)
{
static_call_mod(perf_lopwr_cb)(lopwr_in);
}
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 19b695ff2c68..0c92db84469d 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -7,6 +7,7 @@
/* some helper functions for xen and kvm pv clock sources */
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src);
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
void pvclock_set_flags(u8 flags);
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
@@ -39,7 +40,7 @@ bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
* Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
* yielding a 64-bit result.
*/
-static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
+static __always_inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
{
u64 product;
#ifdef __i386__
diff --git a/arch/x86/include/asm/shared/io.h b/arch/x86/include/asm/shared/io.h
index c0ef921c0586..8009d781c2f9 100644
--- a/arch/x86/include/asm/shared/io.h
+++ b/arch/x86/include/asm/shared/io.h
@@ -5,13 +5,13 @@
#include <linux/types.h>
#define BUILDIO(bwl, bw, type) \
-static inline void __out##bwl(type value, u16 port) \
+static __always_inline void __out##bwl(type value, u16 port) \
{ \
asm volatile("out" #bwl " %" #bw "0, %w1" \
: : "a"(value), "Nd"(port)); \
} \
\
-static inline type __in##bwl(u16 port) \
+static __always_inline type __in##bwl(u16 port) \
{ \
type value; \
asm volatile("in" #bwl " %w1, %" #bw "0" \
diff --git a/arch/x86/include/asm/shared/tdx.h b/arch/x86/include/asm/shared/tdx.h
index e53f26228fbb..559176887791 100644
--- a/arch/x86/include/asm/shared/tdx.h
+++ b/arch/x86/include/asm/shared/tdx.h
@@ -8,7 +8,6 @@
#define TDX_HYPERCALL_STANDARD 0
#define TDX_HCALL_HAS_OUTPUT BIT(0)
-#define TDX_HCALL_ISSUE_STI BIT(1)
#define TDX_CPUID_LEAF_ID 0x21
#define TDX_IDENT "IntelTDX "
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 35f709f619fb..f358a23f228d 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -115,7 +115,7 @@ static inline void wrpkru(u32 pkru)
}
#endif
-static inline void native_wbinvd(void)
+static __always_inline void native_wbinvd(void)
{
asm volatile("wbinvd": : :"memory");
}
@@ -179,7 +179,7 @@ static inline void __write_cr4(unsigned long x)
native_write_cr4(x);
}
-static inline void wbinvd(void)
+static __always_inline void wbinvd(void)
{
native_wbinvd();
}
@@ -196,7 +196,7 @@ static inline void load_gs_index(unsigned int selector)
#endif /* CONFIG_PARAVIRT_XXL */
-static inline void clflush(volatile void *__p)
+static __always_inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
}
@@ -295,7 +295,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
return 0;
}
-static inline void tile_release(void)
+static __always_inline void tile_release(void)
{
/*
* Instruction opcode for TILERELEASE; supported in binutils
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index e5e0fe10c692..a2dd24947eb8 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -382,7 +382,7 @@ MULTI_stack_switch(struct multicall_entry *mcl,
}
#endif
-static inline int
+static __always_inline int
HYPERVISOR_sched_op(int cmd, void *arg)
{
return _hypercall2(int, sched_op, cmd, arg);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bca0bd8f4846..85168740f76a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -86,7 +86,7 @@ void update_spec_ctrl_cond(u64 val)
wrmsrl(MSR_IA32_SPEC_CTRL, val);
}
-u64 spec_ctrl_current(void)
+noinstr u64 spec_ctrl_current(void)
{
return this_cpu_read(x86_spec_ctrl_current);
}
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 02039ec3597d..11f83d07925e 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -143,7 +143,7 @@ static __init int parse_no_stealacc(char *arg)
}
early_param("no-steal-acc", parse_no_stealacc);
-static unsigned long long notrace vmware_sched_clock(void)
+static noinstr u64 vmware_sched_clock(void)
{
unsigned long long ns;
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 9baa89a8877d..dccce58201b7 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -853,12 +853,12 @@ int fpu__exception_code(struct fpu *fpu, int trap_nr)
* Initialize register state that may prevent from entering low-power idle.
* This function will be invoked from the cpuidle driver only when needed.
*/
-void fpu_idle_fpregs(void)
+noinstr void fpu_idle_fpregs(void)
{
/* Note: AMX_TILE being enabled implies XGETBV1 support */
if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
(xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
tile_release();
- fpregs_deactivate(&current->thread.fpu);
+ __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 16333ba1904b..0f35d44c56fe 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -71,12 +71,12 @@ static int kvm_set_wallclock(const struct timespec64 *now)
return -ENODEV;
}
-static u64 kvm_clock_read(void)
+static noinstr u64 kvm_clock_read(void)
{
u64 ret;
preempt_disable_notrace();
- ret = pvclock_clocksource_read(this_cpu_pvti());
+ ret = pvclock_clocksource_read_nowd(this_cpu_pvti());
preempt_enable_notrace();
return ret;
}
@@ -86,7 +86,7 @@ static u64 kvm_clock_get_cycles(struct clocksource *cs)
return kvm_clock_read();
}
-static u64 kvm_sched_clock_read(void)
+static noinstr u64 kvm_sched_clock_read(void)
{
return kvm_clock_read() - kvm_sched_clock_offset;
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 327757afb027..5bf4f0b2f35d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -216,6 +216,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
native_set_debugreg(regno, val);
}
+noinstr void pv_native_wbinvd(void)
+{
+ native_wbinvd();
+}
+
static noinstr void pv_native_irq_enable(void)
{
native_irq_enable();
@@ -225,6 +230,11 @@ static noinstr void pv_native_irq_disable(void)
{
native_irq_disable();
}
+
+static noinstr void pv_native_safe_halt(void)
+{
+ native_safe_halt();
+}
#endif
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
@@ -256,7 +266,7 @@ struct paravirt_patch_template pv_ops = {
.cpu.read_cr0 = native_read_cr0,
.cpu.write_cr0 = native_write_cr0,
.cpu.write_cr4 = native_write_cr4,
- .cpu.wbinvd = native_wbinvd,
+ .cpu.wbinvd = pv_native_wbinvd,
.cpu.read_msr = native_read_msr,
.cpu.write_msr = native_write_msr,
.cpu.read_msr_safe = native_read_msr_safe,
@@ -290,7 +300,7 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
- .irq.safe_halt = native_safe_halt,
+ .irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt,
#endif /* CONFIG_PARAVIRT_XXL */
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 40d156a31676..e57cd31bfec4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -24,6 +24,7 @@
#include <linux/cpuidle.h>
#include <linux/acpi.h>
#include <linux/elf-randomize.h>
+#include <linux/static_call.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
@@ -694,7 +695,24 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-static void (*x86_idle)(void);
+/*
+ * We use this if we don't have any better idle routine..
+ */
+void __cpuidle default_idle(void)
+{
+ raw_safe_halt();
+ raw_local_irq_disable();
+}
+#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
+EXPORT_SYMBOL(default_idle);
+#endif
+
+DEFINE_STATIC_CALL_NULL(x86_idle, default_idle);
+
+static bool x86_idle_set(void)
+{
+ return !!static_call_query(x86_idle);
+}
#ifndef CONFIG_SMP
static inline void play_dead(void)
@@ -717,28 +735,17 @@ void arch_cpu_idle_dead(void)
/*
* Called from the generic idle code.
*/
-void arch_cpu_idle(void)
-{
- x86_idle();
-}
-
-/*
- * We use this if we don't have any better idle routine..
- */
-void __cpuidle default_idle(void)
+void __cpuidle arch_cpu_idle(void)
{
- raw_safe_halt();
+ static_call(x86_idle)();
}
-#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
-EXPORT_SYMBOL(default_idle);
-#endif
#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
{
- bool ret = !!x86_idle;
+ bool ret = x86_idle_set();
- x86_idle = default_idle;
+ static_call_update(x86_idle, default_idle);
return ret;
}
@@ -800,13 +807,7 @@ static void amd_e400_idle(void)
default_idle();
- /*
- * The switch back from broadcast mode needs to be called with
- * interrupts disabled.
- */
- raw_local_irq_disable();
tick_broadcast_exit();
- raw_local_irq_enable();
}
/*
@@ -864,12 +865,10 @@ static __cpuidle void mwait_idle(void)
}
__monitor((void *)&current_thread_info()->flags, 0, 0);
- if (!need_resched())
+ if (!need_resched()) {
__sti_mwait(0, 0);
- else
- raw_local_irq_enable();
- } else {
- raw_local_irq_enable();
+ raw_local_irq_disable();
+ }
}
__current_clr_polling();
}
@@ -880,20 +879,20 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif
- if (x86_idle || boot_option_idle_override == IDLE_POLL)
+ if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
return;
if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
pr_info("using AMD E400 aware idle routine\n");
- x86_idle = amd_e400_idle;
+ static_call_update(x86_idle, amd_e400_idle);
} else if (prefer_mwait_c1_over_halt(c)) {
pr_info("using mwait in idle threads\n");
- x86_idle = mwait_idle;
+ static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
- x86_idle = tdx_safe_halt;
+ static_call_update(x86_idle, tdx_safe_halt);
} else
- x86_idle = default_idle;
+ static_call_update(x86_idle, default_idle);
}
void amd_e400_c1e_apic_setup(void)
@@ -946,7 +945,7 @@ static int __init idle_setup(char *str)
* To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override.
*/
- x86_idle = default_idle;
+ static_call_update(x86_idle, default_idle);
boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) {
/*
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index eda37df016f0..56acf53a782a 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -64,7 +64,8 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
return flags & valid_flags;
}
-u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+static __always_inline
+u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd)
{
unsigned version;
u64 ret;
@@ -77,7 +78,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
flags = src->flags;
} while (pvclock_read_retry(src, version));
- if (unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
+ if (dowd && unlikely((flags & PVCLOCK_GUEST_STOPPED) != 0)) {
src->flags &= ~PVCLOCK_GUEST_STOPPED;
pvclock_touch_watchdogs();
}
@@ -100,16 +101,25 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
* updating at the same time, and one of them could be slightly behind,
* making the assumption that last_value always go forward fail to hold.
*/
- last = atomic64_read(&last_value);
+ last = arch_atomic64_read(&last_value);
do {
- if (ret < last)
+ if (ret <= last)
return last;
- last = atomic64_cmpxchg(&last_value, last, ret);
- } while (unlikely(last != ret));
+ } while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret));
return ret;
}
+u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+{
+ return __pvclock_clocksource_read(src, true);
+}
+
+noinstr u64 pvclock_clocksource_read_nowd(struct pvclock_vcpu_time_info *src)
+{
+ return __pvclock_clocksource_read(src, false);
+}
+
void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
struct pvclock_vcpu_time_info *vcpu_time,
struct timespec64 *ts)
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a78e73da4a74..8c33936b017d 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -215,7 +215,7 @@ static void __init cyc2ns_init_secondary_cpus(void)
/*
* Scheduler clock - returns current time in nanosec units.
*/
-u64 native_sched_clock(void)
+noinstr u64 native_sched_clock(void)
{
if (static_branch_likely(&__use_tsc)) {
u64 tsc_now = rdtsc();
@@ -248,7 +248,7 @@ u64 native_sched_clock_from_tsc(u64 tsc)
/* We need to define a real function for sched_clock, to override the
weak default version */
#ifdef CONFIG_PARAVIRT
-unsigned long long sched_clock(void)
+noinstr u64 sched_clock(void)
{
return paravirt_sched_clock();
}
@@ -258,8 +258,7 @@ bool using_native_sched_clock(void)
return static_call_query(pv_sched_clock) == native_sched_clock;
}
#else
-unsigned long long
-sched_clock(void) __attribute__((alias("native_sched_clock")));
+u64 sched_clock(void) __attribute__((alias("native_sched_clock")));
bool using_native_sched_clock(void) { return true; }
#endif
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 2e0ee14229bf..25f155205770 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -129,7 +129,6 @@ SECTIONS
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
SOFTIRQENTRY_TEXT
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index dd8cd8831251..a64017602010 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -8,7 +8,7 @@
#include <asm/alternative.h>
#include <asm/export.h>
-.pushsection .noinstr.text, "ax"
+.section .noinstr.text, "ax"
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
@@ -43,7 +43,7 @@ SYM_TYPED_FUNC_START(__memcpy)
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
-SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
+SYM_FUNC_ALIAS(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)
/*
@@ -184,4 +184,3 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
RET
SYM_FUNC_END(memcpy_orig)
-.popsection
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 724bbf83eb5b..02661861e5dd 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -13,6 +13,8 @@
#undef memmove
+.section .noinstr.text, "ax"
+
/*
* Implement memmove(). This can handle overlap between src and dst.
*
@@ -213,5 +215,5 @@ SYM_FUNC_START(__memmove)
SYM_FUNC_END(__memmove)
EXPORT_SYMBOL(__memmove)
-SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
+SYM_FUNC_ALIAS(memmove, __memmove)
EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index fc9ffd3ff3b2..6143b1a6fa2c 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -6,6 +6,8 @@
#include <asm/alternative.h>
#include <asm/export.h>
+.section .noinstr.text, "ax"
+
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
@@ -43,7 +45,7 @@ SYM_FUNC_START(__memset)
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
-SYM_FUNC_ALIAS_WEAK(memset, __memset)
+SYM_FUNC_ALIAS(memset, __memset)
EXPORT_SYMBOL(memset)
/*
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5b1379662877..5cacd4890bab 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1068,7 +1068,7 @@ static const typeof(pv_ops) xen_cpu_ops __initconst = {
.write_cr4 = xen_write_cr4,
- .wbinvd = native_wbinvd,
+ .wbinvd = pv_native_wbinvd,
.read_msr = xen_read_msr,
.write_msr = xen_write_msr,
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 06c3c2fb4b06..6092fea7d651 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -24,7 +24,7 @@ noinstr void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
}
-static void xen_safe_halt(void)
+static noinstr void xen_safe_halt(void)
{
/* Blocking includes an implicit local_irq_enable(). */
if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 9ef0a5cca96e..6b8836deb738 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -60,9 +60,17 @@ static u64 xen_clocksource_get_cycles(struct clocksource *cs)
return xen_clocksource_read();
}
-static u64 xen_sched_clock(void)
+static noinstr u64 xen_sched_clock(void)
{
- return xen_clocksource_read() - xen_sched_clock_offset;
+ struct pvclock_vcpu_time_info *src;
+ u64 ret;
+
+ preempt_disable_notrace();
+ src = &__this_cpu_read(xen_vcpu)->time;
+ ret = pvclock_clocksource_read_nowd(src);
+ ret -= xen_sched_clock_offset;
+ preempt_enable_notrace();
+ return ret;
}
static void xen_read_wallclock(struct timespec64 *ts)
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 68e0e2f06d66..a815577d25fd 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -183,6 +183,7 @@ void coprocessor_flush_release_all(struct thread_info *ti)
void arch_cpu_idle(void)
{
platform_idle();
+ raw_local_irq_disable();
}
/*
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 965a3952c47b..c14fd96f459d 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -125,7 +125,6 @@ SECTIONS
ENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
- CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7bf882fcd64b..7f77710c86fc 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -109,8 +109,8 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
- safe_halt();
- local_irq_disable();
+ raw_safe_halt();
+ raw_local_irq_disable();
}
}
@@ -523,8 +523,11 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
-static void wait_for_freeze(void)
+static __cpuidle void io_idle(unsigned long addr)
{
+ /* IO port based C-state */
+ inb(addr);
+
#ifdef CONFIG_X86
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -569,9 +572,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
- /* IO port based C-state */
- inb(cx->address);
- wait_for_freeze();
+ io_idle(cx->address);
}
perf_lopwr_cb(false);
@@ -593,8 +594,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
- inb(cx->address);
- wait_for_freeze();
+ io_idle(cx->address);
} else
return -ENODEV;
@@ -607,7 +607,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
return 0;
}
-static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
+static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
{
return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
@@ -642,6 +642,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
*/
bool dis_bm = pr->flags.bm_control;
+ instrumentation_begin();
+
/* If we can skip BM, demote to a safe state. */
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
dis_bm = false;
@@ -663,11 +665,11 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
- ct_idle_enter();
+ ct_cpuidle_enter();
acpi_idle_do_entry(cx);
- ct_idle_exit();
+ ct_cpuidle_exit();
/* Re-enable bus master arbitration */
if (dis_bm) {
@@ -677,6 +679,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
+ instrumentation_end();
+
return index;
}
@@ -1219,6 +1223,8 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
state->enter = acpi_idle_lpi_enter;
drv->safe_state_index = i;
}
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 50e726b6c2cf..98f7b3d7d669 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -468,7 +468,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle_rcuidle(dev, rpmflags);
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -508,7 +508,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
@@ -530,7 +530,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
@@ -562,7 +562,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend_rcuidle(dev, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -713,7 +713,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
@@ -765,7 +765,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume_rcuidle(dev, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error) {
@@ -935,7 +935,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -1091,7 +1091,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1129,7 +1129,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1212,7 +1212,7 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
@@ -1576,7 +1576,7 @@ void pm_runtime_allow(struct device *dev)
if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else if (ret > 0)
- trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+ trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1646,7 +1646,7 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
}
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e62552a75f08..f6d7c6a9a654 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1055,12 +1055,12 @@ static void clk_core_disable(struct clk_core *core)
if (--core->enable_count > 0)
return;
- trace_clk_disable_rcuidle(core);
+ trace_clk_disable(core);
if (core->ops->disable)
core->ops->disable(core->hw);
- trace_clk_disable_complete_rcuidle(core);
+ trace_clk_disable_complete(core);
clk_core_disable(core->parent);
}
@@ -1114,12 +1114,12 @@ static int clk_core_enable(struct clk_core *core)
if (ret)
return ret;
- trace_clk_enable_rcuidle(core);
+ trace_clk_enable(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
- trace_clk_enable_complete_rcuidle(core);
+ trace_clk_enable_complete(core);
if (ret) {
clk_core_disable(core->parent);
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 8c758920d699..7cfb980a357d 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -31,8 +31,8 @@
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int arm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int arm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
/*
* Pass idle state index to arm_cpuidle_suspend which in turn
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index abe51185f243..74972deda0ea 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -64,7 +64,8 @@ static struct cpuidle_driver bl_idle_little_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 700,
.target_residency = 2500,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM little-cluster power down",
},
@@ -85,7 +86,8 @@ static struct cpuidle_driver bl_idle_big_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 500,
.target_residency = 2000,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM big-cluster power down",
},
@@ -120,15 +122,17 @@ static int notrace bl_powerdown_finisher(unsigned long arg)
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int bl_enter_powerdown(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
cpu_pm_enter();
+ ct_cpuidle_enter();
cpu_suspend(0, bl_powerdown_finisher);
/* signals the MCPM core that CPU is out of low power state */
mcpm_cpu_powered_up();
+ ct_cpuidle_exit();
cpu_pm_exit();
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 01a856971f05..563dba609b98 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -25,9 +25,9 @@
static int (*mvebu_v7_cpu_suspend)(int);
-static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int ret;
bool deepidle = false;
@@ -36,7 +36,10 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE)
deepidle = true;
+ ct_cpuidle_enter();
ret = mvebu_v7_cpu_suspend(deepidle);
+ ct_cpuidle_exit();
+
cpu_pm_exit();
if (ret)
@@ -53,6 +56,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 100,
.power_usage = 50,
.target_residency = 1000,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU IDLE",
.desc = "CPU power down",
},
@@ -61,7 +65,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 1000,
.power_usage = 5,
.target_residency = 10000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU DEEP IDLE",
.desc = "CPU and L2 Fabric power down",
},
@@ -76,7 +80,7 @@ static struct cpuidle_driver armada370_idle_driver = {
.exit_latency = 100,
.power_usage = 5,
.target_residency = 1000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "Deep Idle",
.desc = "CPU and L2 Fabric power down",
},
@@ -91,6 +95,7 @@ static struct cpuidle_driver armada38x_idle_driver = {
.exit_latency = 10,
.power_usage = 5,
.target_residency = 100,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "Idle",
.desc = "CPU and SCU power down",
},
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 57bc3e3ae391..312a34ef28dc 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -49,14 +49,9 @@ static inline u32 psci_get_domain_state(void)
return __this_cpu_read(domain_state);
}
-static inline int psci_enter_state(int idx, u32 state)
-{
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
-}
-
-static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
@@ -69,12 +64,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
state = psci_get_domain_state();
if (!state)
@@ -82,12 +75,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -192,12 +183,12 @@ static void psci_idle_init_cpuhp(void)
pr_warn("Failed %d while setup cpuhp state\n", err);
}
-static int psci_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
- return psci_enter_state(idx, state[idx]);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]);
}
static const struct of_device_id psci_idle_state_match[] = {
@@ -240,6 +231,7 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index beedf22cbe78..326bca154ac7 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -58,8 +58,8 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
return ret;
}
-static int spm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int spm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 05fe2902df9a..be383f4b6855 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -93,8 +93,8 @@ static int sbi_suspend(u32 state)
return sbi_suspend_finisher(state, 0, 0);
}
-static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
u32 state = states[idx];
@@ -106,9 +106,9 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
idx, state);
}
-static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
u32 *states = data->states;
@@ -121,12 +121,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
+
+ ct_cpuidle_enter();
if (sbi_is_domain_state_available())
state = sbi_get_domain_state();
@@ -135,12 +135,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
ret = sbi_suspend(state) ? -1 : idx;
- ct_irq_enter_irqson();
+ ct_cpuidle_exit();
+
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -251,6 +251,7 @@ static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle =
sbi_enter_s2idle_domain_idle_state;
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 9845629aeb6d..b203a93deac5 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -160,8 +160,8 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
return 0;
}
-static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
- int index, unsigned int cpu)
+static __cpuidle int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
{
int err;
@@ -180,9 +180,11 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
}
local_fiq_disable();
- RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
+ tegra_pm_set_cpu_in_lp2();
cpu_pm_enter();
+ ct_cpuidle_enter();
+
switch (index) {
case TEGRA_C7:
err = tegra_cpuidle_c7_enter();
@@ -197,8 +199,10 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
break;
}
+ ct_cpuidle_exit();
+
cpu_pm_exit();
- RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
+ tegra_pm_clear_cpu_in_lp2();
local_fiq_enable();
return err ?: index;
@@ -222,10 +226,11 @@ static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
return index;
}
-static int tegra_cpuidle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
+ bool do_rcu = drv->states[index].flags & CPUIDLE_FLAG_RCU_IDLE;
unsigned int cpu = cpu_logical_map(dev->cpu);
int ret;
@@ -233,9 +238,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
if (dev->states_usage[index].disable)
return -1;
- if (index == TEGRA_C1)
+ if (index == TEGRA_C1) {
+ if (do_rcu)
+ ct_cpuidle_enter();
ret = arm_cpuidle_simple_enter(dev, drv, index);
- else
+ if (do_rcu)
+ ct_cpuidle_exit();
+ } else
ret = tegra_cpuidle_state_enter(dev, index, cpu);
if (ret < 0) {
@@ -285,7 +294,8 @@ static struct cpuidle_driver tegra_idle_driver = {
.exit_latency = 2000,
.target_residency = 2200,
.power_usage = 100,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C7",
.desc = "CPU core powered off",
},
@@ -295,6 +305,7 @@ static struct cpuidle_driver tegra_idle_driver = {
.target_residency = 10000,
.power_usage = 0,
.flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE |
CPUIDLE_FLAG_COUPLED,
.name = "CC6",
.desc = "CPU cluster powered off",
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6eceb1988243..0b00f21cefe3 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/idle.h>
#include <linux/notifier.h>
#include <linux/pm_qos.h>
#include <linux/cpu.h>
@@ -136,11 +137,13 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_s2idle_proper(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, int index)
+static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
{
- ktime_t time_start, time_end;
struct cpuidle_state *target_state = &drv->states[index];
+ ktime_t time_start, time_end;
+
+ instrumentation_begin();
time_start = ns_to_ktime(local_clock());
@@ -151,13 +154,18 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+ raw_local_irq_disable();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
tick_unfreeze();
start_critical_timings();
@@ -165,6 +173,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
dev->states_usage[index].s2idle_usage++;
+ instrumentation_end();
}
/**
@@ -199,8 +208,9 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver for this cpu
* @index: index into the states table in @drv of the state to enter
*/
-int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
- int index)
+noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int entered_state;
@@ -208,6 +218,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
+ instrumentation_begin();
+
/*
* Tell the time framework to switch to a broadcast timer because our
* local timer will be shut down. If a local timer is used from another
@@ -234,11 +246,33 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
+
+ /*
+ * NOTE!!
+ *
+ * For cpuidle_state::enter() methods that do *NOT* set
+ * CPUIDLE_FLAG_RCU_IDLE RCU will be disabled here and these functions
+ * must be marked either noinstr or __cpuidle.
+ *
+ * For cpuidle_state::enter() methods that *DO* set
+ * CPUIDLE_FLAG_RCU_IDLE this isn't required, but they must mark the
+ * function calling ct_cpuidle_enter() as noinstr/__cpuidle and all
+ * functions called within the RCU-idle region.
+ */
entered_state = target_state->enter(dev, drv, index);
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+
+ if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter))
+ raw_local_irq_disable();
+
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
start_critical_timings();
sched_clock_idle_wakeup_event();
@@ -248,12 +282,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
- if (broadcast) {
- if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
-
+ if (broadcast)
tick_broadcast_exit();
- }
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
@@ -305,6 +335,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
dev->states_usage[index].rejected++;
}
+ instrumentation_end();
+
return entered_state;
}
@@ -394,7 +426,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* @dev: the cpuidle device
*
*/
-u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+__cpuidle u64 cpuidle_poll_time(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 7ca3d7d9b5ea..02aa0b39af9d 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -77,7 +77,7 @@ static int init_state_node(struct cpuidle_state *idle_state,
if (err)
desc = state_node->name;
- idle_state->flags = 0;
+ idle_state->flags = CPUIDLE_FLAG_RCU_IDLE;
if (of_property_read_bool(state_node, "local-timer-stop"))
idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
/*
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index f7e83613ae94..bdcfeaecd228 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -13,11 +13,13 @@
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- u64 time_start = local_clock();
+ u64 time_start;
+
+ time_start = local_clock();
dev->poll_time_limit = false;
- local_irq_enable();
+ raw_local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
u64 limit;
@@ -36,6 +38,8 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
}
}
}
+ raw_local_irq_disable();
+
current_clr_polling();
return index;
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 447ee4ea5c90..29619f49873a 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -108,9 +108,10 @@ bool psci_power_state_is_valid(u32 state)
return !(state & ~valid_mask);
}
-static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -118,9 +119,10 @@ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
return res.a0;
}
-static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -128,7 +130,7 @@ static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
return res.a0;
}
-static int psci_to_linux_errno(int errno)
+static __always_inline int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
@@ -169,7 +171,8 @@ int psci_set_osi_mode(bool enable)
return psci_to_linux_errno(err);
}
-static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+static __always_inline int
+__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
@@ -177,13 +180,15 @@ static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
return psci_to_linux_errno(err);
}
-static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
state, entry_point);
}
-static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
state, entry_point);
@@ -450,10 +455,12 @@ late_initcall(psci_debugfs_init)
#endif
#ifdef CONFIG_CPU_IDLE
-static int psci_suspend_finisher(unsigned long state)
+static noinstr int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
- phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+ phys_addr_t pa_cpu_resume;
+
+ pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
@@ -465,11 +472,22 @@ int psci_cpu_suspend_enter(u32 state)
if (!psci_power_state_loses_context(state)) {
struct arm_cpuidle_irq_context context;
+ ct_cpuidle_enter();
arm_cpuidle_save_irq_context(&context);
ret = psci_ops.cpu_suspend(state, 0);
arm_cpuidle_restore_irq_context(&context);
+ ct_cpuidle_exit();
} else {
+ /*
+ * ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
+ */
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_enter();
+
ret = cpu_suspend(state, psci_suspend_finisher);
+
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_exit();
}
return ret;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cfeb24d40d37..e2d64a8f9422 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -168,13 +168,7 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
raw_local_irq_enable();
ret = __intel_idle(dev, drv, index);
-
- /*
- * The lockdep hardirqs state may be changed to 'on' with timer
- * tick interrupt followed by __do_softirq(). Use local_irq_disable()
- * to keep the hardirqs state correct.
- */
- local_irq_disable();
+ raw_local_irq_disable();
return ret;
}
@@ -187,12 +181,12 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
int ret;
if (smt_active)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
ret = __intel_idle(dev, drv, index);
if (smt_active)
- wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
return ret;
}
@@ -1843,6 +1837,9 @@ static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
return true;
}
+static bool force_irq_on __read_mostly;
+module_param(force_irq_on, bool, 0444);
+
static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
{
int cstate;
@@ -1895,8 +1892,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
/* Structure copy. */
drv->states[drv->state_count] = cpuidle_state_table[cstate];
- if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
+ if ((cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) || force_irq_on) {
+ printk("intel_idle: forced intel_idle_irq for state %d\n", cstate);
drv->states[drv->state_count].enter = intel_idle_irq;
+ }
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 40f70f83daba..15bd1e34a88e 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -752,17 +752,8 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
- * armpmu_start() indirectly calls
- *
- * perf_event_update_userpage()
- *
- * that requires RCU read locking to be functional,
- * wrap the call within RCU_NONIDLE to make the
- * RCU subsystem aware this cpu is not idle from
- * an RCU perspective for the armpmu_start() call
- * duration.
*/
- RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
+ armpmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index f6507efe2a58..7b2288d4b1ec 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -771,14 +771,8 @@ static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
- *
- * Requires RCU read locking to be functional,
- * wrap the call within RCU_NONIDLE to make the
- * RCU subsystem aware this cpu is not idle from
- * an RCU perspective for the riscv_pmu_start() call
- * duration.
*/
- RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD));
+ riscv_pmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 9a780fafc539..8a884e795f6a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -46,6 +46,7 @@
#include <linux/cred.h>
#include <linux/dax.h>
#include <linux/uaccess.h>
+#include <linux/rseq.h>
#include <asm/param.h>
#include <asm/page.h>
@@ -288,6 +289,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
if (bprm->have_execfd) {
NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
}
+#ifdef CONFIG_RSEQ
+ NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
+ NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq));
+#endif
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
memset(elf_info, 0, (char *)mm->saved_auxv +
diff --git a/fs/exec.c b/fs/exec.c
index 3d2b80d8d58e..5c00670d25f3 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1010,6 +1010,7 @@ static int exec_mmap(struct mm_struct *mm)
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
+ mm_init_cid(mm);
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
@@ -1822,6 +1823,7 @@ static int bprm_execve(struct linux_binprm *bprm,
*/
check_unsafe_exec(bprm);
current->in_execve = 1;
+ sched_mm_cid_before_execve(current);
file = do_open_execat(fd, filename, flags);
retval = PTR_ERR(file);
@@ -1852,6 +1854,7 @@ static int bprm_execve(struct linux_binprm *bprm,
if (retval < 0)
goto out;
+ sched_mm_cid_after_execve(current);
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
@@ -1871,6 +1874,7 @@ out:
force_fatal_sig(SIGSEGV);
out_unmark:
+ sched_mm_cid_after_execve(current);
current->fs->in_exec = 0;
current->in_execve = 0;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 659bf3b31c91..d1f57e4868ed 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -558,6 +558,9 @@
ALIGN_FUNCTION(); \
__noinstr_text_start = .; \
*(.noinstr.text) \
+ __cpuidle_text_start = .; \
+ *(.cpuidle.text) \
+ __cpuidle_text_end = .; \
__noinstr_text_end = .;
/*
@@ -598,12 +601,6 @@
*(.spinlock.text) \
__lock_text_end = .;
-#define CPUIDLE_TEXT \
- ALIGN_FUNCTION(); \
- __cpuidle_text_start = .; \
- *(.cpuidle.text) \
- __cpuidle_text_end = .;
-
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
__kprobes_text_start = .; \
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index f68d0ec2d740..407f7005e6d6 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -4,6 +4,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 8ae9a95ebf5b..9aac31d856f3 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -211,7 +211,7 @@ extern int tick_receive_broadcast(void);
extern void tick_setup_hrtimer_broadcast(void);
extern int tick_check_broadcast_expired(void);
# else
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
# endif
@@ -219,7 +219,7 @@ static inline void tick_setup_hrtimer_broadcast(void) { }
static inline void clockevents_suspend(void) { }
static inline void clockevents_resume(void) { }
-static inline int tick_check_broadcast_expired(void) { return 0; }
+static __always_inline int tick_check_broadcast_expired(void) { return 0; }
static inline void tick_setup_hrtimer_broadcast(void) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 7c1afe0f4129..dea5bf5bd09c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -232,11 +232,25 @@ struct ftrace_likely_data {
#endif
/* Section for code which can't be instrumented at all */
-#define noinstr \
- noinline notrace __attribute((__section__(".noinstr.text"))) \
+#define __noinstr_section(section) \
+ noinline notrace __attribute((__section__(section))) \
__no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
__no_sanitize_memory
+#define noinstr __noinstr_section(".noinstr.text")
+
+/*
+ * The __cpuidle section is used twofold:
+ *
+ * 1) the original use -- identifying if a CPU is 'stuck' in idle state based
+ * on it's instruction pointer. See cpu_in_idle().
+ *
+ * 2) supressing instrumentation around where cpuidle disables RCU; where the
+ * function isn't strictly required for #1, this is interchangeable with
+ * noinstr.
+ */
+#define __cpuidle __noinstr_section(".cpuidle.text")
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index dcef4a9e4d63..d4afa8508a80 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -130,9 +130,36 @@ static __always_inline unsigned long ct_state_inc(int incby)
return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
}
+static __always_inline bool warn_rcu_enter(void)
+{
+ bool ret = false;
+
+ /*
+ * Horrible hack to shut up recursive RCU isn't watching fail since
+ * lots of the actual reporting also relies on RCU.
+ */
+ preempt_disable_notrace();
+ if (rcu_dynticks_curr_cpu_in_eqs()) {
+ ret = true;
+ ct_state_inc(RCU_DYNTICKS_IDX);
+ }
+
+ return ret;
+}
+
+static __always_inline void warn_rcu_exit(bool rcu)
+{
+ if (rcu)
+ ct_state_inc(RCU_DYNTICKS_IDX);
+ preempt_enable_notrace();
+}
+
#else
static inline void ct_idle_enter(void) { }
static inline void ct_idle_exit(void) { }
+
+static __always_inline bool warn_rcu_enter(void) { return false; }
+static __always_inline void warn_rcu_exit(bool rcu) { }
#endif /* !CONFIG_CONTEXT_TRACKING_IDLE */
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 314802f98b9d..f83e4519c5f0 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -176,9 +176,6 @@ void __noreturn cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
-/* Attach to any functions which should be considered cpuidle. */
-#define __cpuidle __section(".cpuidle.text")
-
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index fce476275e16..3183aeb7f5b4 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/context_tracking.h>
#define CPUIDLE_STATE_MAX 10
#define CPUIDLE_NAME_LEN 16
@@ -115,6 +116,35 @@ struct cpuidle_device {
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
+static __always_inline void ct_cpuidle_enter(void)
+{
+ lockdep_assert_irqs_disabled();
+ /*
+ * Idle is allowed to (temporary) enable IRQs. It
+ * will return with IRQs disabled.
+ *
+ * Trace IRQs enable here, then switch off RCU, and have
+ * arch_cpu_idle() use raw_local_irq_enable(). Note that
+ * ct_idle_enter() relies on lockdep IRQ state, so switch that
+ * last -- this is very similar to the entry code.
+ */
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare();
+ instrumentation_end();
+ ct_idle_enter();
+ lockdep_hardirqs_on(_RET_IP_);
+}
+
+static __always_inline void ct_cpuidle_exit(void)
+{
+ /*
+ * Carefully undo the above.
+ */
+ lockdep_hardirqs_off(_RET_IP_);
+ ct_idle_exit();
+ instrumentation_begin();
+}
+
/****************************
* CPUIDLE DRIVER INTERFACE *
****************************/
@@ -277,7 +307,7 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
idx, \
state, \
- is_retention) \
+ is_retention, is_rcu) \
({ \
int __ret = 0; \
\
@@ -289,7 +319,11 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
if (!is_retention) \
__ret = cpu_pm_enter(); \
if (!__ret) { \
+ if (!is_rcu) \
+ ct_cpuidle_enter(); \
__ret = low_level_idle_enter(state); \
+ if (!is_rcu) \
+ ct_cpuidle_exit(); \
if (!is_retention) \
cpu_pm_exit(); \
} \
@@ -298,15 +332,21 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
})
#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)
#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)
#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)
#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)
+
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state) \
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index c2aa0aa26b45..d45e5de13721 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1017,9 +1017,9 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
* region.
*/
-static inline unsigned int num_online_cpus(void)
+static __always_inline unsigned int num_online_cpus(void)
{
- return atomic_read(&__num_online_cpus);
+ return arch_atomic_read(&__num_online_cpus);
}
#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask)
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 8958f4c005c1..8b9191a2849e 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -161,7 +161,7 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
@@ -177,7 +177,7 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
#else
#ifndef mul_u64_u32_shr
-static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bd3197748562..716d30d93616 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1982,6 +1982,31 @@ struct zap_details {
/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_before_execve(struct task_struct *t);
+void sched_mm_cid_after_execve(struct task_struct *t);
+void sched_mm_cid_fork(struct task_struct *t);
+void sched_mm_cid_exit_signals(struct task_struct *t);
+static inline int task_mm_cid(struct task_struct *t)
+{
+ return t->mm_cid;
+}
+#else
+static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
+static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
+static inline int task_mm_cid(struct task_struct *t)
+{
+ /*
+ * Use the processor id as a fall-back when the mm cid feature is
+ * disabled. This provides functional per-cpu data structure accesses
+ * in user-space, althrough it won't provide the memory usage benefits.
+ */
+ return raw_smp_processor_id();
+}
+#endif
+
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9757067c3053..af8119776ab1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -645,7 +645,18 @@ struct mm_struct {
* &struct mm_struct is freed.
*/
atomic_t mm_count;
-
+#ifdef CONFIG_SCHED_MM_CID
+ /**
+ * @cid_lock: Protect cid bitmap updates vs lookups.
+ *
+ * Prevent situations where updates to the cid bitmap happen
+ * concurrently with lookups. Those can lead to situations
+ * where a lookup cannot find a free bit simply because it was
+ * unlucky enough to load, non-atomically, bitmap words as they
+ * were being concurrently updated by the updaters.
+ */
+ raw_spinlock_t cid_lock;
+#endif
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
@@ -909,6 +920,36 @@ static inline void vma_iter_init(struct vma_iterator *vmi,
vmi->mas.node = MAS_START;
}
+#ifdef CONFIG_SCHED_MM_CID
+/* Accessor for struct mm_struct's cidmask. */
+static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
+{
+ unsigned long cid_bitmap = (unsigned long)mm;
+
+ cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ /* Skip cpu_bitmap */
+ cid_bitmap += cpumask_size();
+ return (struct cpumask *)cid_bitmap;
+}
+
+static inline void mm_init_cid(struct mm_struct *mm)
+{
+ raw_spin_lock_init(&mm->cid_lock);
+ cpumask_clear(mm_cidmask(mm));
+}
+
+static inline unsigned int mm_cid_size(void)
+{
+ return cpumask_size();
+}
+#else /* CONFIG_SCHED_MM_CID */
+static inline void mm_init_cid(struct mm_struct *mm) { }
+static inline unsigned int mm_cid_size(void)
+{
+ return 0;
+}
+#endif /* CONFIG_SCHED_MM_CID */
+
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index af1071535de8..e60727be79c4 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -310,7 +310,7 @@ extern void __bad_size_call_parameter(void);
#ifdef CONFIG_DEBUG_PREEMPT
extern void __this_cpu_preempt_check(const char *op);
#else
-static inline void __this_cpu_preempt_check(const char *op) { }
+static __always_inline void __this_cpu_preempt_check(const char *op) { }
#endif
#define __pcpu_size_call_return(stem, variable) \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6f6ce9ca7097..63d242164b1a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1302,6 +1302,7 @@ struct task_struct {
#ifdef CONFIG_RSEQ
struct rseq __user *rseq;
+ u32 rseq_len;
u32 rseq_sig;
/*
* RmW on rseq_event_mask must be performed atomically
@@ -1310,6 +1311,11 @@ struct task_struct {
unsigned long rseq_event_mask;
#endif
+#ifdef CONFIG_SCHED_MM_CID
+ int mm_cid; /* Current cid in mm */
+ int mm_cid_active; /* Whether cid bitmap is active */
+#endif
+
struct tlbflush_unmap_batch tlb_ubc;
union {
@@ -2352,10 +2358,12 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_VM) {
t->rseq = NULL;
+ t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
+ t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
@@ -2364,6 +2372,7 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
+ t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 867d588314e0..ca008f7d3615 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -45,7 +45,7 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
-static inline u64 local_clock(void)
+static __always_inline u64 local_clock(void)
{
return sched_clock();
}
@@ -79,10 +79,8 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
-static inline u64 local_clock(void)
-{
- return sched_clock_cpu(raw_smp_processor_id());
-}
+extern u64 local_clock(void);
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index ce3c58286062..5f8fd5b24a2e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -8,15 +8,6 @@
* cputime accounting APIs:
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm/cputime.h>
-
-#ifndef cputime_to_nsecs
-# define cputime_to_nsecs(__ct) \
- (cputime_to_usecs(__ct) * NSEC_PER_USEC)
-#endif
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index d73d314d59c6..478084f9105e 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -23,12 +23,37 @@ static inline void wake_up_if_idle(int cpu) { }
*/
#ifdef TIF_POLLING_NRFLAG
-static inline void __current_set_polling(void)
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+static __always_inline void __current_set_polling(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
+ arch_set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
}
-static inline bool __must_check current_set_polling_and_test(void)
+static __always_inline void __current_clr_polling(void)
+{
+ arch_clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline void __current_set_polling(void)
+{
+ set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+static __always_inline void __current_clr_polling(void)
+{
+ clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
+
+static __always_inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
@@ -41,12 +66,7 @@ static inline bool __must_check current_set_polling_and_test(void)
return unlikely(tif_need_resched());
}
-static inline void __current_clr_polling(void)
-{
- clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
+static __always_inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
@@ -73,7 +93,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
-static inline void current_clr_polling(void)
+static __always_inline void current_clr_polling(void)
{
__current_clr_polling();
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 9f392ec76f2b..c02646884fa8 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -177,7 +177,23 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
#endif /* !CONFIG_GENERIC_ENTRY */
-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+
+static __always_inline bool tif_need_resched(void)
+{
+ return arch_test_bit(TIF_NEED_RESCHED,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline bool tif_need_resched(void)
+{
+ return test_bit(TIF_NEED_RESCHED,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index c303f7a114e9..d48cd92d2364 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -135,6 +135,21 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
# define do_ftrace_record_recursion(ip, pip) do { } while (0)
#endif
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+# define trace_warn_on_no_rcu(ip) \
+ ({ \
+ bool __ret = !rcu_is_watching(); \
+ if (__ret && !trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
+ trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
+ WARN_ONCE(true, "RCU not on for: %pS\n", (void *)ip); \
+ trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
+ } \
+ __ret; \
+ })
+#else
+# define trace_warn_on_no_rcu(ip) false
+#endif
+
/*
* Preemption is promised to be disabled when return bit >= 0.
*/
@@ -144,6 +159,9 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign
unsigned int val = READ_ONCE(current->trace_recursion);
int bit;
+ if (trace_warn_on_no_rcu(ip))
+ return -1;
+
bit = trace_get_context_bit() + start;
if (unlikely(val & (1 << bit))) {
/*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 4b33b95eb8be..552f80b8362f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -178,6 +178,17 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#endif /* CONFIG_HAVE_STATIC_CALL */
/*
+ * ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle
+ * code that disallow any/all tracing/instrumentation when RCU isn't watching.
+ */
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#define RCUIDLE_COND(rcuidle) (rcuidle)
+#else
+/* srcu can't be used from NMI */
+#define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi())
+#endif
+
+/*
* it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL.
*/
@@ -188,8 +199,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
if (!(cond)) \
return; \
\
- /* srcu can't be used from NMI */ \
- WARN_ON_ONCE(rcuidle && in_nmi()); \
+ if (WARN_ON_ONCE(RCUIDLE_COND(rcuidle))) \
+ return; \
\
/* keep srcu and sched-rcu usage consistent */ \
preempt_disable_notrace(); \
diff --git a/include/trace/events/rseq.h b/include/trace/events/rseq.h
index a04a64bc1a00..823b47d1ba1e 100644
--- a/include/trace/events/rseq.h
+++ b/include/trace/events/rseq.h
@@ -16,13 +16,18 @@ TRACE_EVENT(rseq_update,
TP_STRUCT__entry(
__field(s32, cpu_id)
+ __field(s32, node_id)
+ __field(s32, mm_cid)
),
TP_fast_assign(
__entry->cpu_id = raw_smp_processor_id();
+ __entry->node_id = cpu_to_node(__entry->cpu_id);
+ __entry->mm_cid = task_mm_cid(t);
),
- TP_printk("cpu_id=%d", __entry->cpu_id)
+ TP_printk("cpu_id=%d node_id=%d mm_cid=%d", __entry->cpu_id,
+ __entry->node_id, __entry->mm_cid)
);
TRACE_EVENT(rseq_ip_fixup,
diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h
index c7e502bf5a6f..6991c4b8ab18 100644
--- a/include/uapi/linux/auxvec.h
+++ b/include/uapi/linux/auxvec.h
@@ -30,6 +30,8 @@
* differ from AT_PLATFORM. */
#define AT_RANDOM 25 /* address of 16 random bytes */
#define AT_HWCAP2 26 /* extension of AT_HWCAP */
+#define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size */
+#define AT_RSEQ_ALIGN 28 /* rseq allocation alignment */
#define AT_EXECFN 31 /* filename of program */
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 737605897f36..5f3ad6d5be6f 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -137,6 +137,9 @@
* @MEMBARRIER_CMD_SHARED:
* Alias to MEMBARRIER_CMD_GLOBAL. Provided for
* header backward compatibility.
+ * @MEMBARRIER_CMD_GET_REGISTRATIONS:
+ * Returns a bitmask of previously issued
+ * registration commands.
*
* Command to be passed to the membarrier system call. The commands need to
* be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
@@ -153,6 +156,7 @@ enum membarrier_cmd {
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6),
MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ = (1 << 7),
MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ = (1 << 8),
+ MEMBARRIER_CMD_GET_REGISTRATIONS = (1 << 9),
/* Alias for header backward compatibility. */
MEMBARRIER_CMD_SHARED = MEMBARRIER_CMD_GLOBAL,
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index 77ee207623a9..c233aae5eac9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -130,6 +130,28 @@ struct rseq {
* this thread.
*/
__u32 flags;
+
+ /*
+ * Restartable sequences node_id field. Updated by the kernel. Read by
+ * user-space with single-copy atomicity semantics. This field should
+ * only be read by the thread which registered this data structure.
+ * Aligned on 32-bit. Contains the current NUMA node ID.
+ */
+ __u32 node_id;
+
+ /*
+ * Restartable sequences mm_cid field. Updated by the kernel. Read by
+ * user-space with single-copy atomicity semantics. This field should
+ * only be read by the thread which registered this data structure.
+ * Aligned on 32-bit. Contains the current thread's concurrency ID
+ * (allocated uniquely within a memory map).
+ */
+ __u32 mm_cid;
+
+ /*
+ * Flexible array member at end of structure, after last feature field.
+ */
+ char end[];
} __attribute__((aligned(4 * sizeof(__u64))));
#endif /* _UAPI_LINUX_RSEQ_H */
diff --git a/init/Kconfig b/init/Kconfig
index 44e90b28a30f..e76dc579cfa2 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1045,6 +1045,10 @@ config RT_GROUP_SCHED
endif #CGROUP_SCHED
+config SCHED_MM_CID
+ def_bool y
+ depends on SMP && RSEQ
+
config UCLAMP_TASK_GROUP
bool "Utilization clamping per group of tasks"
depends on CGROUP_SCHED
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 77978e372377..a09f1c19336a 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* In this we case we don't care about any concurrency/ordering.
*/
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
- atomic_set(&ct->state, state);
+ arch_atomic_set(&ct->state, state);
} else {
/*
* Even if context tracking is disabled on this CPU, because it's outside
@@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
*/
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */
- atomic_set(&ct->state, state);
+ arch_atomic_set(&ct->state, state);
} else {
/*
* Tracking for vtime and RCU EQS. Make sure we don't race
@@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* RCU only requires RCU_DYNTICKS_IDX increments to be fully
* ordered.
*/
- atomic_add(state, &ct->state);
+ arch_atomic_add(state, &ct->state);
}
}
}
@@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
* In this we case we don't care about any concurrency/ordering.
*/
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
- atomic_set(&ct->state, CONTEXT_KERNEL);
+ arch_atomic_set(&ct->state, CONTEXT_KERNEL);
} else {
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */
- atomic_set(&ct->state, CONTEXT_KERNEL);
+ arch_atomic_set(&ct->state, CONTEXT_KERNEL);
} else {
/*
* Tracking for vtime and RCU EQS. Make sure we don't race
@@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
* RCU only requires RCU_DYNTICKS_IDX increments to be fully
* ordered.
*/
- atomic_sub(state, &ct->state);
+ arch_atomic_sub(state, &ct->state);
}
}
}
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index ba4ba71facf9..b0f0d15085db 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -30,16 +30,9 @@ static int cpu_pm_notify(enum cpu_pm_event event)
{
int ret;
- /*
- * This introduces a RCU read critical section, which could be
- * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
- * this.
- */
- ct_irq_enter_irqson();
rcu_read_lock();
ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
rcu_read_unlock();
- ct_irq_exit_irqson();
return notifier_to_errno(ret);
}
@@ -49,11 +42,9 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
unsigned long flags;
int ret;
- ct_irq_enter_irqson();
raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
- ct_irq_exit_irqson();
return notifier_to_errno(ret);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 15dc2ec80c46..bccfa4218356 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -807,6 +807,8 @@ void __noreturn do_exit(long code)
struct task_struct *tsk = current;
int group_dead;
+ WARN_ON(irqs_disabled());
+
synchronize_group_exit(tsk, code);
WARN_ON(tsk->plug);
@@ -938,6 +940,11 @@ void __noreturn make_task_dead(int signr)
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
+ if (unlikely(irqs_disabled())) {
+ pr_info("note: %s[%d] exited with irqs disabled\n",
+ current->comm, task_pid_nr(current));
+ local_irq_enable();
+ }
if (unlikely(in_atomic())) {
pr_info("note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
diff --git a/kernel/fork.c b/kernel/fork.c
index d9c97704b7c9..038b898dad52 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1060,6 +1060,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->reported_split_lock = 0;
#endif
+#ifdef CONFIG_SCHED_MM_CID
+ tsk->mm_cid = -1;
+ tsk->mm_cid_active = 0;
+#endif
return tsk;
free_stack:
@@ -1169,6 +1173,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
mm->user_ns = get_user_ns(user_ns);
lru_gen_init_mm(mm);
+ mm_init_cid(mm);
return mm;
fail_pcpu:
@@ -1601,6 +1606,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
tsk->mm = mm;
tsk->active_mm = mm;
+ sched_mm_cid_fork(tsk);
return 0;
}
@@ -3034,7 +3040,7 @@ void __init mm_cache_init(void)
* dynamically sized based on the maximum CPU number this system
* can have, taking hotplug into account (nr_cpu_ids).
*/
- mm_size = sizeof(struct mm_struct) + cpumask_size();
+ mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
mm_cachep = kmem_cache_create_usercopy("mm_struct",
mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e3375bc40dad..50d4863974e7 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -55,6 +55,7 @@
#include <linux/rcupdate.h>
#include <linux/kprobes.h>
#include <linux/lockdep.h>
+#include <linux/context_tracking.h>
#include <asm/sections.h>
@@ -6555,6 +6556,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
struct task_struct *curr = current;
int dl = READ_ONCE(debug_locks);
+ bool rcu = warn_rcu_enter();
/* Note: the following can be executed concurrently, so be careful. */
pr_warn("\n");
@@ -6595,5 +6597,6 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
lockdep_print_held_locks(curr);
pr_warn("\nstack backtrace:\n");
dump_stack();
+ warn_rcu_exit(rcu);
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --git a/kernel/panic.c b/kernel/panic.c
index 463c9295bc28..487f5b03bf83 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -34,6 +34,7 @@
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
#include <linux/sysfs.h>
+#include <linux/context_tracking.h>
#include <trace/events/error_report.h>
#include <asm/sections.h>
@@ -679,6 +680,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
void warn_slowpath_fmt(const char *file, int line, unsigned taint,
const char *fmt, ...)
{
+ bool rcu = warn_rcu_enter();
struct warn_args args;
pr_warn(CUT_HERE);
@@ -693,11 +695,13 @@ void warn_slowpath_fmt(const char *file, int line, unsigned taint,
va_start(args.args, fmt);
__warn(file, line, __builtin_return_address(0), taint, NULL, &args);
va_end(args.args);
+ warn_rcu_exit(rcu);
}
EXPORT_SYMBOL(warn_slowpath_fmt);
#else
void __warn_printk(const char *fmt, ...)
{
+ bool rcu = warn_rcu_enter();
va_list args;
pr_warn(CUT_HERE);
@@ -705,6 +709,7 @@ void __warn_printk(const char *fmt, ...)
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
+ warn_rcu_exit(rcu);
}
EXPORT_SYMBOL(__warn_printk);
#endif
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index a5ed2e53547c..94f136b25f6a 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2196,7 +2196,7 @@ static u16 printk_sprint(char *text, u16 size, int facility,
}
}
- trace_console_rcuidle(text, text_len);
+ trace_console(text, text_len);
return text_len;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 54482193e1ed..0786450074c1 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -813,7 +813,7 @@ static long ptrace_get_rseq_configuration(struct task_struct *task,
{
struct ptrace_rseq_configuration conf = {
.rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
- .rseq_abi_size = sizeof(*task->rseq),
+ .rseq_abi_size = task->rseq_len,
.signature = task->rseq_sig,
.flags = 0,
};
diff --git a/kernel/rseq.c b/kernel/rseq.c
index d38ab944105d..9de6e35fe679 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -18,6 +18,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/rseq.h>
+/* The original rseq structure size (including padding) is 32 bytes. */
+#define ORIG_RSEQ_SIZE 32
+
#define RSEQ_CS_NO_RESTART_FLAGS (RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT | \
RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL | \
RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE)
@@ -82,15 +85,25 @@
* F1. <failure>
*/
-static int rseq_update_cpu_id(struct task_struct *t)
+static int rseq_update_cpu_node_id(struct task_struct *t)
{
- u32 cpu_id = raw_smp_processor_id();
struct rseq __user *rseq = t->rseq;
+ u32 cpu_id = raw_smp_processor_id();
+ u32 node_id = cpu_to_node(cpu_id);
+ u32 mm_cid = task_mm_cid(t);
- if (!user_write_access_begin(rseq, sizeof(*rseq)))
+ WARN_ON_ONCE((int) mm_cid < 0);
+ if (!user_write_access_begin(rseq, t->rseq_len))
goto efault;
unsafe_put_user(cpu_id, &rseq->cpu_id_start, efault_end);
unsafe_put_user(cpu_id, &rseq->cpu_id, efault_end);
+ unsafe_put_user(node_id, &rseq->node_id, efault_end);
+ unsafe_put_user(mm_cid, &rseq->mm_cid, efault_end);
+ /*
+ * Additional feature fields added after ORIG_RSEQ_SIZE
+ * need to be conditionally updated only if
+ * t->rseq_len != ORIG_RSEQ_SIZE.
+ */
user_write_access_end();
trace_rseq_update(t);
return 0;
@@ -101,9 +114,10 @@ efault:
return -EFAULT;
}
-static int rseq_reset_rseq_cpu_id(struct task_struct *t)
+static int rseq_reset_rseq_cpu_node_id(struct task_struct *t)
{
- u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED;
+ u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0,
+ mm_cid = 0;
/*
* Reset cpu_id_start to its initial state (0).
@@ -117,6 +131,21 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
*/
if (put_user(cpu_id, &t->rseq->cpu_id))
return -EFAULT;
+ /*
+ * Reset node_id to its initial state (0).
+ */
+ if (put_user(node_id, &t->rseq->node_id))
+ return -EFAULT;
+ /*
+ * Reset mm_cid to its initial state (0).
+ */
+ if (put_user(mm_cid, &t->rseq->mm_cid))
+ return -EFAULT;
+ /*
+ * Additional feature fields added after ORIG_RSEQ_SIZE
+ * need to be conditionally reset only if
+ * t->rseq_len != ORIG_RSEQ_SIZE.
+ */
return 0;
}
@@ -301,7 +330,7 @@ void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
if (unlikely(ret < 0))
goto error;
}
- if (unlikely(rseq_update_cpu_id(t)))
+ if (unlikely(rseq_update_cpu_node_id(t)))
goto error;
return;
@@ -344,15 +373,16 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
/* Unregister rseq for current thread. */
if (current->rseq != rseq || !current->rseq)
return -EINVAL;
- if (rseq_len != sizeof(*rseq))
+ if (rseq_len != current->rseq_len)
return -EINVAL;
if (current->rseq_sig != sig)
return -EPERM;
- ret = rseq_reset_rseq_cpu_id(current);
+ ret = rseq_reset_rseq_cpu_node_id(current);
if (ret)
return ret;
current->rseq = NULL;
current->rseq_sig = 0;
+ current->rseq_len = 0;
return 0;
}
@@ -365,7 +395,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
* the provided address differs from the prior
* one.
*/
- if (current->rseq != rseq || rseq_len != sizeof(*rseq))
+ if (current->rseq != rseq || rseq_len != current->rseq_len)
return -EINVAL;
if (current->rseq_sig != sig)
return -EPERM;
@@ -374,15 +404,24 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
}
/*
- * If there was no rseq previously registered,
- * ensure the provided rseq is properly aligned and valid.
+ * If there was no rseq previously registered, ensure the provided rseq
+ * is properly aligned, as communcated to user-space through the ELF
+ * auxiliary vector AT_RSEQ_ALIGN. If rseq_len is the original rseq
+ * size, the required alignment is the original struct rseq alignment.
+ *
+ * In order to be valid, rseq_len is either the original rseq size, or
+ * large enough to contain all supported fields, as communicated to
+ * user-space through the ELF auxiliary vector AT_RSEQ_FEATURE_SIZE.
*/
- if (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) ||
- rseq_len != sizeof(*rseq))
+ if (rseq_len < ORIG_RSEQ_SIZE ||
+ (rseq_len == ORIG_RSEQ_SIZE && !IS_ALIGNED((unsigned long)rseq, ORIG_RSEQ_SIZE)) ||
+ (rseq_len != ORIG_RSEQ_SIZE && (!IS_ALIGNED((unsigned long)rseq, __alignof__(*rseq)) ||
+ rseq_len < offsetof(struct rseq, end))))
return -EINVAL;
if (!access_ok(rseq, rseq_len))
return -EFAULT;
current->rseq = rseq;
+ current->rseq_len = rseq_len;
current->rseq_sig = sig;
/*
* If rseq was previously inactive, and has just been
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index e374c0c923da..5732fa75ebab 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -93,7 +93,7 @@ struct sched_clock_data {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
-notrace static inline struct sched_clock_data *this_scd(void)
+static __always_inline struct sched_clock_data *this_scd(void)
{
return this_cpu_ptr(&sched_clock_data);
}
@@ -244,12 +244,12 @@ late_initcall(sched_clock_init_late);
* min, max except they take wrapping into account
*/
-notrace static inline u64 wrap_min(u64 x, u64 y)
+static __always_inline u64 wrap_min(u64 x, u64 y)
{
return (s64)(x - y) < 0 ? x : y;
}
-notrace static inline u64 wrap_max(u64 x, u64 y)
+static __always_inline u64 wrap_max(u64 x, u64 y)
{
return (s64)(x - y) > 0 ? x : y;
}
@@ -260,7 +260,7 @@ notrace static inline u64 wrap_max(u64 x, u64 y)
* - filter out backward motion
* - use the GTOD tick value to create a window to filter crazy TSC values
*/
-notrace static u64 sched_clock_local(struct sched_clock_data *scd)
+static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
{
u64 now, clock, old_clock, min_clock, max_clock, gtod;
s64 delta;
@@ -287,13 +287,28 @@ again:
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
- if (!try_cmpxchg64(&scd->clock, &old_clock, clock))
+ if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock))
goto again;
return clock;
}
-notrace static u64 sched_clock_remote(struct sched_clock_data *scd)
+noinstr u64 local_clock(void)
+{
+ u64 clock;
+
+ if (static_branch_likely(&__sched_clock_stable))
+ return sched_clock() + __sched_clock_offset;
+
+ preempt_disable_notrace();
+ clock = sched_clock_local(this_scd());
+ preempt_enable_notrace();
+
+ return clock;
+}
+EXPORT_SYMBOL_GPL(local_clock);
+
+static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
{
struct sched_clock_data *my_scd = this_scd();
u64 this_clock, remote_clock;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2a4918a1faa9..fb49dbf61273 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -152,7 +152,7 @@ __read_mostly int scheduler_running;
DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
/* kernel prio, less is more */
-static inline int __task_prio(struct task_struct *p)
+static inline int __task_prio(const struct task_struct *p)
{
if (p->sched_class == &stop_sched_class) /* trumps deadline */
return -2;
@@ -174,7 +174,8 @@ static inline int __task_prio(struct task_struct *p)
*/
/* real prio, less is less */
-static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+static inline bool prio_less(const struct task_struct *a,
+ const struct task_struct *b, bool in_fi)
{
int pa = __task_prio(a), pb = __task_prio(b);
@@ -194,7 +195,8 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool
return false;
}
-static inline bool __sched_core_less(struct task_struct *a, struct task_struct *b)
+static inline bool __sched_core_less(const struct task_struct *a,
+ const struct task_struct *b)
{
if (a->core_cookie < b->core_cookie)
return true;
@@ -3675,14 +3677,39 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
}
/*
- * Mark the task runnable and perform wakeup-preemption.
+ * Mark the task runnable.
*/
-static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
- struct rq_flags *rf)
+static inline void ttwu_do_wakeup(struct task_struct *p)
{
- check_preempt_curr(rq, p, wake_flags);
WRITE_ONCE(p->__state, TASK_RUNNING);
trace_sched_wakeup(p);
+}
+
+static void
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
+ struct rq_flags *rf)
+{
+ int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
+
+ lockdep_assert_rq_held(rq);
+
+ if (p->sched_contributes_to_load)
+ rq->nr_uninterruptible--;
+
+#ifdef CONFIG_SMP
+ if (wake_flags & WF_MIGRATED)
+ en_flags |= ENQUEUE_MIGRATED;
+ else
+#endif
+ if (p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+ activate_task(rq, p, en_flags);
+ check_preempt_curr(rq, p, wake_flags);
+
+ ttwu_do_wakeup(p);
#ifdef CONFIG_SMP
if (p->sched_class->task_woken) {
@@ -3712,31 +3739,6 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
#endif
}
-static void
-ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
- struct rq_flags *rf)
-{
- int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
-
- lockdep_assert_rq_held(rq);
-
- if (p->sched_contributes_to_load)
- rq->nr_uninterruptible--;
-
-#ifdef CONFIG_SMP
- if (wake_flags & WF_MIGRATED)
- en_flags |= ENQUEUE_MIGRATED;
- else
-#endif
- if (p->in_iowait) {
- delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
- }
-
- activate_task(rq, p, en_flags);
- ttwu_do_wakeup(rq, p, wake_flags, rf);
-}
-
/*
* Consider @p being inside a wait loop:
*
@@ -3770,9 +3772,15 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
rq = __task_rq_lock(p, &rf);
if (task_on_rq_queued(p)) {
- /* check_preempt_curr() may use rq clock */
- update_rq_clock(rq);
- ttwu_do_wakeup(rq, p, wake_flags, &rf);
+ if (!task_on_cpu(rq, p)) {
+ /*
+ * When on_rq && !on_cpu the task is preempted, see if
+ * it should preempt the task that is current now.
+ */
+ update_rq_clock(rq);
+ check_preempt_curr(rq, p, wake_flags);
+ }
+ ttwu_do_wakeup(p);
ret = 1;
}
__task_rq_unlock(rq, &rf);
@@ -4138,8 +4146,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
goto out;
trace_sched_waking(p);
- WRITE_ONCE(p->__state, TASK_RUNNING);
- trace_sched_wakeup(p);
+ ttwu_do_wakeup(p);
goto out;
}
@@ -5104,6 +5111,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
sched_info_switch(rq, prev, next);
perf_event_task_sched_out(prev, next);
rseq_preempt(prev);
+ switch_mm_cid(prev, next);
fire_sched_out_preempt_notifiers(prev, next);
kmap_local_sched_out();
prepare_task(next);
@@ -6260,7 +6268,7 @@ static bool steal_cookie_task(int cpu, struct sched_domain *sd)
{
int i;
- for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
+ for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) {
if (i == cpu)
continue;
@@ -11365,3 +11373,53 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count)
{
trace_sched_update_nr_running_tp(rq, count);
}
+
+#ifdef CONFIG_SCHED_MM_CID
+void sched_mm_cid_exit_signals(struct task_struct *t)
+{
+ struct mm_struct *mm = t->mm;
+ unsigned long flags;
+
+ if (!mm)
+ return;
+ local_irq_save(flags);
+ mm_cid_put(mm, t->mm_cid);
+ t->mm_cid = -1;
+ t->mm_cid_active = 0;
+ local_irq_restore(flags);
+}
+
+void sched_mm_cid_before_execve(struct task_struct *t)
+{
+ struct mm_struct *mm = t->mm;
+ unsigned long flags;
+
+ if (!mm)
+ return;
+ local_irq_save(flags);
+ mm_cid_put(mm, t->mm_cid);
+ t->mm_cid = -1;
+ t->mm_cid_active = 0;
+ local_irq_restore(flags);
+}
+
+void sched_mm_cid_after_execve(struct task_struct *t)
+{
+ struct mm_struct *mm = t->mm;
+ unsigned long flags;
+
+ if (!mm)
+ return;
+ local_irq_save(flags);
+ t->mm_cid = mm_cid_get(mm);
+ t->mm_cid_active = 1;
+ local_irq_restore(flags);
+ rseq_set_notify_resume(t);
+}
+
+void sched_mm_cid_fork(struct task_struct *t)
+{
+ WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
+ t->mm_cid_active = 1;
+}
+#endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 1207c78f85c1..5c840151f3bb 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -48,7 +48,6 @@ struct sugov_cpu {
unsigned long util;
unsigned long bw_dl;
- unsigned long max;
/* The field below is for single-CPU policies only: */
#ifdef CONFIG_NO_HZ_COMMON
@@ -158,7 +157,6 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
- sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
FREQUENCY_UTIL, NULL);
@@ -238,6 +236,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* sugov_iowait_apply() - Apply the IO boost to a CPU.
* @sg_cpu: the sugov data for the cpu to boost
* @time: the update time from the caller
+ * @max_cap: the max CPU capacity
*
* A CPU running a task which woken up after an IO operation can have its
* utilization boosted to speed up the completion of those IO operations.
@@ -251,7 +250,8 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
* This mechanism is designed to boost high frequently IO waiting tasks, while
* being more conservative on tasks which does sporadic IO operations.
*/
-static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
+static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
+ unsigned long max_cap)
{
unsigned long boost;
@@ -280,7 +280,7 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
* sg_cpu->util is already in capacity scale; convert iowait_boost
* into the same scale so we can compare.
*/
- boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
+ boost = (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
if (sg_cpu->util < boost)
sg_cpu->util = boost;
@@ -310,7 +310,8 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
}
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
- u64 time, unsigned int flags)
+ u64 time, unsigned long max_cap,
+ unsigned int flags)
{
sugov_iowait_boost(sg_cpu, time, flags);
sg_cpu->last_update = time;
@@ -321,7 +322,7 @@ static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
return false;
sugov_get_util(sg_cpu);
- sugov_iowait_apply(sg_cpu, time);
+ sugov_iowait_apply(sg_cpu, time, max_cap);
return true;
}
@@ -332,12 +333,15 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned int cached_freq = sg_policy->cached_raw_freq;
+ unsigned long max_cap;
unsigned int next_f;
- if (!sugov_update_single_common(sg_cpu, time, flags))
+ max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
+
+ if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
return;
- next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
+ next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
/*
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
@@ -374,6 +378,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
unsigned long prev_util = sg_cpu->util;
+ unsigned long max_cap;
/*
* Fall back to the "frequency" path if frequency invariance is not
@@ -385,7 +390,9 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
return;
}
- if (!sugov_update_single_common(sg_cpu, time, flags))
+ max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
+
+ if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
return;
/*
@@ -399,7 +406,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
sg_cpu->util = prev_util;
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
- map_util_perf(sg_cpu->util), sg_cpu->max);
+ map_util_perf(sg_cpu->util), max_cap);
sg_cpu->sg_policy->last_freq_update_time = time;
}
@@ -408,25 +415,21 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- unsigned long util = 0, max = 1;
+ unsigned long util = 0, max_cap;
unsigned int j;
+ max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
+
for_each_cpu(j, policy->cpus) {
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
- unsigned long j_util, j_max;
sugov_get_util(j_sg_cpu);
- sugov_iowait_apply(j_sg_cpu, time);
- j_util = j_sg_cpu->util;
- j_max = j_sg_cpu->max;
+ sugov_iowait_apply(j_sg_cpu, time, max_cap);
- if (j_util * max > j_max * util) {
- util = j_util;
- max = j_max;
- }
+ util = max(j_sg_cpu->util, util);
}
- return get_next_freq(sg_policy, util, max);
+ return get_next_freq(sg_policy, util, max_cap);
}
static void
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 95fc77853743..af7952f12e6c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -3,6 +3,10 @@
* Simple CPU accounting cgroup controller
*/
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ #include <asm/cputime.h>
+#endif
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0d97d54276cc..71b24371a6f7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2663,17 +2663,20 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
int oldprio)
{
- if (task_on_rq_queued(p) || task_current(rq, p)) {
+ if (!task_on_rq_queued(p))
+ return;
+
#ifdef CONFIG_SMP
- /*
- * This might be too much, but unfortunately
- * we don't have the old deadline value, and
- * we can't argue if the task is increasing
- * or lowering its prio, so...
- */
- if (!rq->dl.overloaded)
- deadline_queue_pull_task(rq);
+ /*
+ * This might be too much, but unfortunately
+ * we don't have the old deadline value, and
+ * we can't argue if the task is increasing
+ * or lowering its prio, so...
+ */
+ if (!rq->dl.overloaded)
+ deadline_queue_pull_task(rq);
+ if (task_current(rq, p)) {
/*
* If we now have a earlier deadline task than p,
* then reschedule, provided p is still on this
@@ -2681,15 +2684,24 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
*/
if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
resched_curr(rq);
-#else
+ } else {
/*
- * Again, we don't know if p has a earlier
- * or later deadline, so let's blindly set a
- * (maybe not needed) rescheduling point.
+ * Current may not be deadline in case p was throttled but we
+ * have just replenished it (e.g. rt_mutex_setprio()).
+ *
+ * Otherwise, if p was given an earlier deadline, reschedule.
*/
- resched_curr(rq);
-#endif /* CONFIG_SMP */
+ if (!dl_task(rq->curr) ||
+ dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
+ resched_curr(rq);
}
+#else
+ /*
+ * We don't know if p has a earlier or later deadline, so let's blindly
+ * set a (maybe not needed) rescheduling point.
+ */
+ resched_curr(rq);
+#endif
}
DEFINE_SCHED_CLASS(dl) = {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0f8736991427..ff4dbbae3b10 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -468,7 +468,7 @@ is_same_group(struct sched_entity *se, struct sched_entity *pse)
return NULL;
}
-static inline struct sched_entity *parent_entity(struct sched_entity *se)
+static inline struct sched_entity *parent_entity(const struct sched_entity *se)
{
return se->parent;
}
@@ -595,8 +595,8 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
return min_vruntime;
}
-static inline bool entity_before(struct sched_entity *a,
- struct sched_entity *b)
+static inline bool entity_before(const struct sched_entity *a,
+ const struct sched_entity *b)
{
return (s64)(a->vruntime - b->vruntime) < 0;
}
@@ -1804,7 +1804,7 @@ static void update_numa_stats(struct task_numa_env *env,
ns->nr_running += rq->cfs.h_nr_running;
ns->compute_capacity += capacity_of(cpu);
- if (find_idle && !rq->nr_running && idle_cpu(cpu)) {
+ if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) {
if (READ_ONCE(rq->numa_migrate_on) ||
!cpumask_test_cpu(cpu, env->p->cpus_ptr))
continue;
@@ -1836,7 +1836,7 @@ static void task_numa_assign(struct task_numa_env *env,
int start = env->dst_cpu;
/* Find alternative idle CPU. */
- for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) {
+ for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start + 1) {
if (cpu == env->best_cpu || !idle_cpu(cpu) ||
!cpumask_test_cpu(cpu, env->p->cpus_ptr)) {
continue;
@@ -4476,17 +4476,9 @@ static inline int util_fits_cpu(unsigned long util,
*
* For uclamp_max, we can tolerate a drop in performance level as the
* goal is to cap the task. So it's okay if it's getting less.
- *
- * In case of capacity inversion we should honour the inverted capacity
- * for both uclamp_min and uclamp_max all the time.
*/
- capacity_orig = cpu_in_capacity_inversion(cpu);
- if (capacity_orig) {
- capacity_orig_thermal = capacity_orig;
- } else {
- capacity_orig = capacity_orig_of(cpu);
- capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
- }
+ capacity_orig = capacity_orig_of(cpu);
+ capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
/*
* We want to force a task to fit a cpu as implied by uclamp_max.
@@ -4561,8 +4553,8 @@ static inline int util_fits_cpu(unsigned long util,
* handle the case uclamp_min > uclamp_max.
*/
uclamp_min = min(uclamp_min, uclamp_max);
- if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE)
- fits = fits && (uclamp_min <= capacity_orig_thermal);
+ if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
+ return -1;
return fits;
}
@@ -4572,7 +4564,11 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu)
unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
unsigned long util = task_util_est(p);
- return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
+ /*
+ * Return true only if the cpu fully fits the task requirements, which
+ * include the utilization but also the performance hints.
+ */
+ return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
}
static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
@@ -4656,6 +4652,7 @@ static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = cfs_rq->min_vruntime;
+ u64 sleep_time;
/*
* The 'current' period is already promised to the current tasks,
@@ -4685,8 +4682,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime -= thresh;
}
- /* ensure we never gain time by being placed backwards. */
- se->vruntime = max_vruntime(se->vruntime, vruntime);
+ /*
+ * Pull vruntime of the entity being placed to the base level of
+ * cfs_rq, to prevent boosting it if placed backwards. If the entity
+ * slept for a long time, don't even try to compare its vruntime with
+ * the base as it may be too far off and the comparison may get
+ * inversed due to s64 overflow.
+ */
+ sleep_time = rq_clock_task(rq_of(cfs_rq)) - se->exec_start;
+ if ((s64)sleep_time > 60LL * NSEC_PER_SEC)
+ se->vruntime = vruntime;
+ else
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4896,7 +4903,13 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
struct sched_entity *se;
s64 delta;
- ideal_runtime = sched_slice(cfs_rq, curr);
+ /*
+ * When many tasks blow up the sched_period; it is possible that
+ * sched_slice() reports unusually large results (when many tasks are
+ * very light for example). Therefore impose a maximum.
+ */
+ ideal_runtime = min_t(u64, sched_slice(cfs_rq, curr), sysctl_sched_latency);
+
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
resched_curr(rq_of(cfs_rq));
@@ -5461,22 +5474,105 @@ unthrottle_throttle:
resched_curr(rq);
}
-static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
+#ifdef CONFIG_SMP
+static void __cfsb_csd_unthrottle(void *arg)
{
- struct cfs_rq *cfs_rq;
+ struct cfs_rq *cursor, *tmp;
+ struct rq *rq = arg;
+ struct rq_flags rf;
+
+ rq_lock(rq, &rf);
+
+ /*
+ * Since we hold rq lock we're safe from concurrent manipulation of
+ * the CSD list. However, this RCU critical section annotates the
+ * fact that we pair with sched_free_group_rcu(), so that we cannot
+ * race with group being freed in the window between removing it
+ * from the list and advancing to the next entry in the list.
+ */
+ rcu_read_lock();
+
+ list_for_each_entry_safe(cursor, tmp, &rq->cfsb_csd_list,
+ throttled_csd_list) {
+ list_del_init(&cursor->throttled_csd_list);
+
+ if (cfs_rq_throttled(cursor))
+ unthrottle_cfs_rq(cursor);
+ }
+
+ rcu_read_unlock();
+
+ rq_unlock(rq, &rf);
+}
+
+static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
+{
+ struct rq *rq = rq_of(cfs_rq);
+ bool first;
+
+ if (rq == this_rq()) {
+ unthrottle_cfs_rq(cfs_rq);
+ return;
+ }
+
+ /* Already enqueued */
+ if (SCHED_WARN_ON(!list_empty(&cfs_rq->throttled_csd_list)))
+ return;
+
+ first = list_empty(&rq->cfsb_csd_list);
+ list_add_tail(&cfs_rq->throttled_csd_list, &rq->cfsb_csd_list);
+ if (first)
+ smp_call_function_single_async(cpu_of(rq), &rq->cfsb_csd);
+}
+#else
+static inline void __unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
+{
+ unthrottle_cfs_rq(cfs_rq);
+}
+#endif
+
+static void unthrottle_cfs_rq_async(struct cfs_rq *cfs_rq)
+{
+ lockdep_assert_rq_held(rq_of(cfs_rq));
+
+ if (SCHED_WARN_ON(!cfs_rq_throttled(cfs_rq) ||
+ cfs_rq->runtime_remaining <= 0))
+ return;
+
+ __unthrottle_cfs_rq_async(cfs_rq);
+}
+
+static bool distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
+{
+ struct cfs_rq *local_unthrottle = NULL;
+ int this_cpu = smp_processor_id();
u64 runtime, remaining = 1;
+ bool throttled = false;
+ struct cfs_rq *cfs_rq;
+ struct rq_flags rf;
+ struct rq *rq;
rcu_read_lock();
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
throttled_list) {
- struct rq *rq = rq_of(cfs_rq);
- struct rq_flags rf;
+ rq = rq_of(cfs_rq);
+
+ if (!remaining) {
+ throttled = true;
+ break;
+ }
rq_lock_irqsave(rq, &rf);
if (!cfs_rq_throttled(cfs_rq))
goto next;
- /* By the above check, this should never be true */
+#ifdef CONFIG_SMP
+ /* Already queued for async unthrottle */
+ if (!list_empty(&cfs_rq->throttled_csd_list))
+ goto next;
+#endif
+
+ /* By the above checks, this should never be true */
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
raw_spin_lock(&cfs_b->lock);
@@ -5490,16 +5586,30 @@ static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
cfs_rq->runtime_remaining += runtime;
/* we check whether we're throttled above */
- if (cfs_rq->runtime_remaining > 0)
- unthrottle_cfs_rq(cfs_rq);
+ if (cfs_rq->runtime_remaining > 0) {
+ if (cpu_of(rq) != this_cpu ||
+ SCHED_WARN_ON(local_unthrottle))
+ unthrottle_cfs_rq_async(cfs_rq);
+ else
+ local_unthrottle = cfs_rq;
+ } else {
+ throttled = true;
+ }
next:
rq_unlock_irqrestore(rq, &rf);
-
- if (!remaining)
- break;
}
rcu_read_unlock();
+
+ if (local_unthrottle) {
+ rq = cpu_rq(this_cpu);
+ rq_lock_irqsave(rq, &rf);
+ if (cfs_rq_throttled(local_unthrottle))
+ unthrottle_cfs_rq(local_unthrottle);
+ rq_unlock_irqrestore(rq, &rf);
+ }
+
+ return throttled;
}
/*
@@ -5544,10 +5654,8 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u
while (throttled && cfs_b->runtime > 0) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
/* we can't nest cfs_b->lock while distributing bandwidth */
- distribute_cfs_runtime(cfs_b);
+ throttled = distribute_cfs_runtime(cfs_b);
raw_spin_lock_irqsave(&cfs_b->lock, flags);
-
- throttled = !list_empty(&cfs_b->throttled_cfs_rq);
}
/*
@@ -5824,6 +5932,9 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
cfs_rq->runtime_enabled = 0;
INIT_LIST_HEAD(&cfs_rq->throttled_list);
+#ifdef CONFIG_SMP
+ INIT_LIST_HEAD(&cfs_rq->throttled_csd_list);
+#endif
}
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -5840,12 +5951,38 @@ void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
+ int __maybe_unused i;
+
/* init_cfs_bandwidth() was not called */
if (!cfs_b->throttled_cfs_rq.next)
return;
hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
+
+ /*
+ * It is possible that we still have some cfs_rq's pending on a CSD
+ * list, though this race is very rare. In order for this to occur, we
+ * must have raced with the last task leaving the group while there
+ * exist throttled cfs_rq(s), and the period_timer must have queued the
+ * CSD item but the remote cpu has not yet processed it. To handle this,
+ * we can simply flush all pending CSD work inline here. We're
+ * guaranteed at this point that no additional cfs_rq of this group can
+ * join a CSD list.
+ */
+#ifdef CONFIG_SMP
+ for_each_possible_cpu(i) {
+ struct rq *rq = cpu_rq(i);
+ unsigned long flags;
+
+ if (list_empty(&rq->cfsb_csd_list))
+ continue;
+
+ local_irq_save(flags);
+ __cfsb_csd_unthrottle(rq);
+ local_irq_restore(flags);
+ }
+#endif
}
/*
@@ -6008,6 +6145,7 @@ static inline bool cpu_overutilized(int cpu)
unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+ /* Return true only if the utilization doesn't fit CPU's capacity */
return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
}
@@ -6801,6 +6939,7 @@ static int
select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
{
unsigned long task_util, util_min, util_max, best_cap = 0;
+ int fits, best_fits = 0;
int cpu, best_cpu = -1;
struct cpumask *cpus;
@@ -6811,17 +6950,33 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
- for_each_cpu_wrap(cpu, cpus, target) {
+ for_each_cpu_wrap(cpu, cpus, target + 1) {
unsigned long cpu_cap = capacity_of(cpu);
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
continue;
- if (util_fits_cpu(task_util, util_min, util_max, cpu))
+
+ fits = util_fits_cpu(task_util, util_min, util_max, cpu);
+
+ /* This CPU fits with all requirements */
+ if (fits > 0)
return cpu;
+ /*
+ * Only the min performance hint (i.e. uclamp_min) doesn't fit.
+ * Look for the CPU with best capacity.
+ */
+ else if (fits < 0)
+ cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
- if (cpu_cap > best_cap) {
+ /*
+ * First, select CPU which fits better (-1 being better than 0).
+ * Then, select the one with best capacity at same level.
+ */
+ if ((fits < best_fits) ||
+ ((fits == best_fits) && (cpu_cap > best_cap))) {
best_cap = cpu_cap;
best_cpu = cpu;
+ best_fits = fits;
}
}
@@ -6834,7 +6989,11 @@ static inline bool asym_fits_cpu(unsigned long util,
int cpu)
{
if (sched_asym_cpucap_active())
- return util_fits_cpu(util, util_min, util_max, cpu);
+ /*
+ * Return true only if the cpu fully fits the task requirements
+ * which include the utilization and the performance hints.
+ */
+ return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
return true;
}
@@ -7201,6 +7360,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
struct root_domain *rd = this_rq()->rd;
int cpu, best_energy_cpu, target = -1;
+ int prev_fits = -1, best_fits = -1;
+ unsigned long best_thermal_cap = 0;
+ unsigned long prev_thermal_cap = 0;
struct sched_domain *sd;
struct perf_domain *pd;
struct energy_env eenv;
@@ -7236,6 +7398,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
unsigned long prev_spare_cap = 0;
int max_spare_cap_cpu = -1;
unsigned long base_energy;
+ int fits, max_fits = -1;
cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
@@ -7285,7 +7448,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
util_min = max(rq_util_min, p_util_min);
util_max = max(rq_util_max, p_util_max);
}
- if (!util_fits_cpu(util, util_min, util_max, cpu))
+
+ fits = util_fits_cpu(util, util_min, util_max, cpu);
+ if (!fits)
continue;
lsub_positive(&cpu_cap, util);
@@ -7293,7 +7458,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
if (cpu == prev_cpu) {
/* Always use prev_cpu as a candidate. */
prev_spare_cap = cpu_cap;
- } else if (cpu_cap > max_spare_cap) {
+ prev_fits = fits;
+ } else if ((fits > max_fits) ||
+ ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
/*
* Find the CPU with the maximum spare capacity
* among the remaining CPUs in the performance
@@ -7301,6 +7468,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
*/
max_spare_cap = cpu_cap;
max_spare_cap_cpu = cpu;
+ max_fits = fits;
}
}
@@ -7319,26 +7487,50 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
if (prev_delta < base_energy)
goto unlock;
prev_delta -= base_energy;
+ prev_thermal_cap = cpu_thermal_cap;
best_delta = min(best_delta, prev_delta);
}
/* Evaluate the energy impact of using max_spare_cap_cpu. */
if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
+ /* Current best energy cpu fits better */
+ if (max_fits < best_fits)
+ continue;
+
+ /*
+ * Both don't fit performance hint (i.e. uclamp_min)
+ * but best energy cpu has better capacity.
+ */
+ if ((max_fits < 0) &&
+ (cpu_thermal_cap <= best_thermal_cap))
+ continue;
+
cur_delta = compute_energy(&eenv, pd, cpus, p,
max_spare_cap_cpu);
/* CPU utilization has changed */
if (cur_delta < base_energy)
goto unlock;
cur_delta -= base_energy;
- if (cur_delta < best_delta) {
- best_delta = cur_delta;
- best_energy_cpu = max_spare_cap_cpu;
- }
+
+ /*
+ * Both fit for the task but best energy cpu has lower
+ * energy impact.
+ */
+ if ((max_fits > 0) && (best_fits > 0) &&
+ (cur_delta >= best_delta))
+ continue;
+
+ best_delta = cur_delta;
+ best_energy_cpu = max_spare_cap_cpu;
+ best_fits = max_fits;
+ best_thermal_cap = cpu_thermal_cap;
}
}
rcu_read_unlock();
- if (best_delta < prev_delta)
+ if ((best_fits > prev_fits) ||
+ ((best_fits > 0) && (best_delta < prev_delta)) ||
+ ((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
target = best_energy_cpu;
return target;
@@ -8838,82 +9030,16 @@ static unsigned long scale_rt_capacity(int cpu)
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
- unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
unsigned long capacity = scale_rt_capacity(cpu);
struct sched_group *sdg = sd->groups;
- struct rq *rq = cpu_rq(cpu);
- rq->cpu_capacity_orig = capacity_orig;
+ cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
if (!capacity)
capacity = 1;
- rq->cpu_capacity = capacity;
-
- /*
- * Detect if the performance domain is in capacity inversion state.
- *
- * Capacity inversion happens when another perf domain with equal or
- * lower capacity_orig_of() ends up having higher capacity than this
- * domain after subtracting thermal pressure.
- *
- * We only take into account thermal pressure in this detection as it's
- * the only metric that actually results in *real* reduction of
- * capacity due to performance points (OPPs) being dropped/become
- * unreachable due to thermal throttling.
- *
- * We assume:
- * * That all cpus in a perf domain have the same capacity_orig
- * (same uArch).
- * * Thermal pressure will impact all cpus in this perf domain
- * equally.
- */
- if (sched_energy_enabled()) {
- unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
- struct perf_domain *pd;
-
- rcu_read_lock();
-
- pd = rcu_dereference(rq->rd->pd);
- rq->cpu_capacity_inverted = 0;
-
- for (; pd; pd = pd->next) {
- struct cpumask *pd_span = perf_domain_span(pd);
- unsigned long pd_cap_orig, pd_cap;
-
- /* We can't be inverted against our own pd */
- if (cpumask_test_cpu(cpu_of(rq), pd_span))
- continue;
-
- cpu = cpumask_any(pd_span);
- pd_cap_orig = arch_scale_cpu_capacity(cpu);
-
- if (capacity_orig < pd_cap_orig)
- continue;
-
- /*
- * handle the case of multiple perf domains have the
- * same capacity_orig but one of them is under higher
- * thermal pressure. We record it as capacity
- * inversion.
- */
- if (capacity_orig == pd_cap_orig) {
- pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
-
- if (pd_cap > inv_cap) {
- rq->cpu_capacity_inverted = inv_cap;
- break;
- }
- } else if (pd_cap_orig > inv_cap) {
- rq->cpu_capacity_inverted = inv_cap;
- break;
- }
- }
-
- rcu_read_unlock();
- }
-
- trace_sched_cpu_capacity_tp(rq);
+ cpu_rq(cpu)->cpu_capacity = capacity;
+ trace_sched_cpu_capacity_tp(cpu_rq(cpu));
sdg->sgc->capacity = capacity;
sdg->sgc->min_capacity = capacity;
@@ -10141,24 +10267,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
*/
update_sd_lb_stats(env, &sds);
- if (sched_energy_enabled()) {
- struct root_domain *rd = env->dst_rq->rd;
-
- if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
- goto out_balanced;
- }
-
- local = &sds.local_stat;
- busiest = &sds.busiest_stat;
-
/* There is no busy sibling group to pull tasks from */
if (!sds.busiest)
goto out_balanced;
+ busiest = &sds.busiest_stat;
+
/* Misfit tasks should be dealt with regardless of the avg load */
if (busiest->group_type == group_misfit_task)
goto force_balance;
+ if (sched_energy_enabled()) {
+ struct root_domain *rd = env->dst_rq->rd;
+
+ if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
+ goto out_balanced;
+ }
+
/* ASYM feature bypasses nice load balance check */
if (busiest->group_type == group_asym_packing)
goto force_balance;
@@ -10171,6 +10296,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
if (busiest->group_type == group_imbalanced)
goto force_balance;
+ local = &sds.local_stat;
/*
* If the local group is busier than the selected busiest group
* don't try and pull any tasks.
@@ -11734,7 +11860,8 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
/*
* se_fi_update - Update the cfs_rq->min_vruntime_fi in a CFS hierarchy if needed.
*/
-static void se_fi_update(struct sched_entity *se, unsigned int fi_seq, bool forceidle)
+static void se_fi_update(const struct sched_entity *se, unsigned int fi_seq,
+ bool forceidle)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -11759,11 +11886,12 @@ void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi)
se_fi_update(se, rq->core->core_forceidle_seq, in_fi);
}
-bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi)
+bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool in_fi)
{
struct rq *rq = task_rq(a);
- struct sched_entity *sea = &a->se;
- struct sched_entity *seb = &b->se;
+ const struct sched_entity *sea = &a->se;
+ const struct sched_entity *seb = &b->se;
struct cfs_rq *cfs_rqa;
struct cfs_rq *cfs_rqb;
s64 delta;
@@ -12480,6 +12608,11 @@ __init void init_sched_fair_class(void)
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(load_balance_mask, i), GFP_KERNEL, cpu_to_node(i));
zalloc_cpumask_var_node(&per_cpu(select_rq_mask, i), GFP_KERNEL, cpu_to_node(i));
+
+#ifdef CONFIG_CFS_BANDWIDTH
+ INIT_CSD(&cpu_rq(i)->cfsb_csd, __cfsb_csd_unthrottle, cpu_rq(i));
+ INIT_LIST_HEAD(&cpu_rq(i)->cfsb_csd_list);
+#endif
}
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index f26ab2675f7d..e9ef66be2870 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -51,18 +51,22 @@ __setup("hlt", cpu_idle_nopoll_setup);
static noinline int __cpuidle cpu_idle_poll(void)
{
+ instrumentation_begin();
trace_cpu_idle(0, smp_processor_id());
stop_critical_timings();
- ct_idle_enter();
- local_irq_enable();
+ ct_cpuidle_enter();
+ raw_local_irq_enable();
while (!tif_need_resched() &&
(cpu_idle_force_poll || tick_check_broadcast_expired()))
cpu_relax();
+ raw_local_irq_disable();
- ct_idle_exit();
+ ct_cpuidle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+ local_irq_enable();
+ instrumentation_end();
return 1;
}
@@ -75,7 +79,6 @@ void __weak arch_cpu_idle_dead(void) { }
void __weak arch_cpu_idle(void)
{
cpu_idle_force_poll = 1;
- raw_local_irq_enable();
}
/**
@@ -85,44 +88,20 @@ void __weak arch_cpu_idle(void)
*/
void __cpuidle default_idle_call(void)
{
- if (current_clr_polling_and_test()) {
- local_irq_enable();
- } else {
-
+ instrumentation_begin();
+ if (!current_clr_polling_and_test()) {
trace_cpu_idle(1, smp_processor_id());
stop_critical_timings();
- /*
- * arch_cpu_idle() is supposed to enable IRQs, however
- * we can't do that because of RCU and tracing.
- *
- * Trace IRQs enable here, then switch off RCU, and have
- * arch_cpu_idle() use raw_local_irq_enable(). Note that
- * ct_idle_enter() relies on lockdep IRQ state, so switch that
- * last -- this is very similar to the entry code.
- */
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare();
- ct_idle_enter();
- lockdep_hardirqs_on(_THIS_IP_);
-
+ ct_cpuidle_enter();
arch_cpu_idle();
-
- /*
- * OK, so IRQs are enabled here, but RCU needs them disabled to
- * turn itself back on.. funny thing is that disabling IRQs
- * will cause tracing, which needs RCU. Jump through hoops to
- * make it 'work'.
- */
- raw_local_irq_disable();
- lockdep_hardirqs_off(_THIS_IP_);
- ct_idle_exit();
- lockdep_hardirqs_on(_THIS_IP_);
- raw_local_irq_enable();
+ ct_cpuidle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
+ local_irq_enable();
+ instrumentation_end();
}
static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 0c5be7ebb1dc..2ad881d07752 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -159,7 +159,8 @@
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
- | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
+ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
+ | MEMBARRIER_CMD_GET_REGISTRATIONS)
static void ipi_mb(void *info)
{
@@ -540,6 +541,40 @@ static int membarrier_register_private_expedited(int flags)
return 0;
}
+static int membarrier_get_registrations(void)
+{
+ struct task_struct *p = current;
+ struct mm_struct *mm = p->mm;
+ int registrations_mask = 0, membarrier_state, i;
+ static const int states[] = {
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED |
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE |
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY,
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ |
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
+ };
+ static const int registration_cmds[] = {
+ MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED,
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED,
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE,
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ
+ };
+ BUILD_BUG_ON(ARRAY_SIZE(states) != ARRAY_SIZE(registration_cmds));
+
+ membarrier_state = atomic_read(&mm->membarrier_state);
+ for (i = 0; i < ARRAY_SIZE(states); ++i) {
+ if (membarrier_state & states[i]) {
+ registrations_mask |= registration_cmds[i];
+ membarrier_state &= ~states[i];
+ }
+ }
+ WARN_ON_ONCE(membarrier_state != 0);
+ return registrations_mask;
+}
+
/**
* sys_membarrier - issue memory barriers on a set of threads
* @cmd: Takes command values defined in enum membarrier_cmd.
@@ -623,6 +658,8 @@ SYSCALL_DEFINE3(membarrier, int, cmd, unsigned int, flags, int, cpu_id)
return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ, cpu_id);
case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ:
return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ);
+ case MEMBARRIER_CMD_GET_REGISTRATIONS:
+ return membarrier_get_registrations();
default:
return -EINVAL;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ed2a47e4ddae..0a11f44adee5 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1777,6 +1777,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
+ if (SCHED_WARN_ON(list_empty(queue)))
+ return NULL;
next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
@@ -1789,7 +1791,8 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
do {
rt_se = pick_next_rt_entity(rt_rq);
- BUG_ON(!rt_se);
+ if (unlikely(!rt_se))
+ return NULL;
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 771f8ddb7053..3e8df6d31c1e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -248,7 +248,7 @@ static inline void update_avg(u64 *avg, u64 sample)
#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
-static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
+static inline bool dl_entity_is_special(const struct sched_dl_entity *dl_se)
{
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
@@ -260,8 +260,8 @@ static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
/*
* Tells if entity @a should preempt entity @b.
*/
-static inline bool
-dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
+static inline bool dl_entity_preempt(const struct sched_dl_entity *a,
+ const struct sched_dl_entity *b)
{
return dl_entity_is_special(a) ||
dl_time_before(a->deadline, b->deadline);
@@ -645,6 +645,9 @@ struct cfs_rq {
int throttled;
int throttle_count;
struct list_head throttled_list;
+#ifdef CONFIG_SMP
+ struct list_head throttled_csd_list;
+#endif
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
};
@@ -1041,7 +1044,6 @@ struct rq {
unsigned long cpu_capacity;
unsigned long cpu_capacity_orig;
- unsigned long cpu_capacity_inverted;
struct balance_callback *balance_callback;
@@ -1154,6 +1156,11 @@ struct rq {
/* Scratch cpumask to be temporarily used under rq_lock */
cpumask_var_t scratch_mask;
+
+#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SMP)
+ call_single_data_t cfsb_csd;
+ struct list_head cfsb_csd_list;
+#endif
};
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1236,7 +1243,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
return &rq->__lock;
}
-bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
+bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b,
+ bool fi);
/*
* Helpers to check if the CPU's core cookie matches with the task's cookie
@@ -1415,7 +1423,7 @@ static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
}
/* runqueue on which this entity is (to be) queued */
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
return se->cfs_rq;
}
@@ -1428,19 +1436,16 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
#else
-static inline struct task_struct *task_of(struct sched_entity *se)
-{
- return container_of(se, struct task_struct, se);
-}
+#define task_of(_se) container_of(_se, struct task_struct, se)
-static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
+static inline struct cfs_rq *task_cfs_rq(const struct task_struct *p)
{
return &task_rq(p)->cfs;
}
-static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
+static inline struct cfs_rq *cfs_rq_of(const struct sched_entity *se)
{
- struct task_struct *p = task_of(se);
+ const struct task_struct *p = task_of(se);
struct rq *rq = task_rq(p);
return &rq->cfs;
@@ -2893,24 +2898,6 @@ static inline unsigned long capacity_orig_of(int cpu)
return cpu_rq(cpu)->cpu_capacity_orig;
}
-/*
- * Returns inverted capacity if the CPU is in capacity inversion state.
- * 0 otherwise.
- *
- * Capacity inversion detection only considers thermal impact where actual
- * performance points (OPPs) gets dropped.
- *
- * Capacity inversion state happens when another performance domain that has
- * equal or lower capacity_orig_of() becomes effectively larger than the perf
- * domain this CPU belongs to due to thermal pressure throttling it hard.
- *
- * See comment in update_cpu_capacity().
- */
-static inline unsigned long cpu_in_capacity_inversion(int cpu)
-{
- return cpu_rq(cpu)->cpu_capacity_inverted;
-}
-
/**
* enum cpu_util_type - CPU utilization type
* @FREQUENCY_UTIL: Utilization used to select frequency
@@ -3261,4 +3248,62 @@ static inline void update_current_exec_runtime(struct task_struct *curr,
cgroup_account_cputime(curr, delta_exec);
}
+#ifdef CONFIG_SCHED_MM_CID
+static inline int __mm_cid_get(struct mm_struct *mm)
+{
+ struct cpumask *cpumask;
+ int cid;
+
+ cpumask = mm_cidmask(mm);
+ cid = cpumask_first_zero(cpumask);
+ if (cid >= nr_cpu_ids)
+ return -1;
+ __cpumask_set_cpu(cid, cpumask);
+ return cid;
+}
+
+static inline void mm_cid_put(struct mm_struct *mm, int cid)
+{
+ lockdep_assert_irqs_disabled();
+ if (cid < 0)
+ return;
+ raw_spin_lock(&mm->cid_lock);
+ __cpumask_clear_cpu(cid, mm_cidmask(mm));
+ raw_spin_unlock(&mm->cid_lock);
+}
+
+static inline int mm_cid_get(struct mm_struct *mm)
+{
+ int ret;
+
+ lockdep_assert_irqs_disabled();
+ raw_spin_lock(&mm->cid_lock);
+ ret = __mm_cid_get(mm);
+ raw_spin_unlock(&mm->cid_lock);
+ return ret;
+}
+
+static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next)
+{
+ if (prev->mm_cid_active) {
+ if (next->mm_cid_active && next->mm == prev->mm) {
+ /*
+ * Context switch between threads in same mm, hand over
+ * the mm_cid from prev to next.
+ */
+ next->mm_cid = prev->mm_cid;
+ prev->mm_cid = -1;
+ return;
+ }
+ mm_cid_put(prev->mm, prev->mm_cid);
+ prev->mm_cid = -1;
+ }
+ if (next->mm_cid_active)
+ next->mm_cid = mm_cid_get(next->mm);
+}
+
+#else
+static inline void switch_mm_cid(struct task_struct *prev, struct task_struct *next) { }
+#endif
+
#endif /* _KERNEL_SCHED_SCHED_H */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8739c2a5a54e..d93c3379e901 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -578,7 +578,7 @@ out:
*/
struct root_domain def_root_domain;
-void init_defrootdomain(void)
+void __init init_defrootdomain(void)
{
init_rootdomain(&def_root_domain);
@@ -2451,7 +2451,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
* Set up scheduler domains and groups. For now this just excludes isolated
* CPUs, but could be used to exclude other special cases in the future.
*/
-int sched_init_domains(const struct cpumask *cpu_map)
+int __init sched_init_domains(const struct cpumask *cpu_map)
{
int err;
diff --git a/kernel/signal.c b/kernel/signal.c
index ae26da61c4d9..8cb28f1df294 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2951,6 +2951,7 @@ void exit_signals(struct task_struct *tsk)
cgroup_threadgroup_change_begin(tsk);
if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
+ sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
return;
@@ -2961,6 +2962,7 @@ void exit_signals(struct task_struct *tsk)
* From now this task is not visible for group-wide signals,
* see wants_signal(), do_signal_stop().
*/
+ sched_mm_cid_exit_signals(tsk);
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 797eb93103ad..e28f9210f8a1 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -56,25 +56,20 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
* hrtimer callback function is currently running, then
* hrtimer_start() cannot move it and the timer stays on the CPU on
* which it is assigned at the moment.
+ */
+ hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
+ /*
+ * The core tick broadcast mode expects bc->bound_on to be set
+ * correctly to prevent a CPU which has the broadcast hrtimer
+ * armed from going deep idle.
*
- * As this can be called from idle code, the hrtimer_start()
- * invocation has to be wrapped with RCU_NONIDLE() as
- * hrtimer_start() can call into tracing.
+ * As tick_broadcast_lock is held, nothing can change the cpu
+ * base which was just established in hrtimer_start() above. So
+ * the below access is safe even without holding the hrtimer
+ * base lock.
*/
- RCU_NONIDLE( {
- hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
- /*
- * The core tick broadcast mode expects bc->bound_on to be set
- * correctly to prevent a CPU which has the broadcast hrtimer
- * armed from going deep idle.
- *
- * As tick_broadcast_lock is held, nothing can change the cpu
- * base which was just established in hrtimer_start() above. So
- * the below access is safe even without holding the hrtimer
- * base lock.
- */
- bc->bound_on = bctimer.base->cpu_base->cpu;
- } );
+ bc->bound_on = bctimer.base->cpu_base->cpu;
+
return 0;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f7fe6fe36173..93bf2b4e47e5 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -622,9 +622,13 @@ struct cpumask *tick_get_broadcast_oneshot_mask(void)
* to avoid a deep idle transition as we are about to get the
* broadcast IPI right away.
*/
-int tick_check_broadcast_expired(void)
+noinstr int tick_check_broadcast_expired(void)
{
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+ return arch_test_bit(smp_processor_id(), cpumask_bits(tick_broadcast_force_mask));
+#else
return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
+#endif
}
/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c9e40f692650..54a163ae4815 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3128,6 +3128,9 @@ void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
return;
}
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
+ return;
+
/*
* When an NMI triggers, RCU is enabled via ct_nmi_enter(),
* but if the above rcu_is_watching() failed, then the NMI
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 1e130da1b742..e37446f7916e 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -15,6 +15,20 @@
#define CREATE_TRACE_POINTS
#include <trace/events/preemptirq.h>
+/*
+ * Use regular trace points on architectures that implement noinstr
+ * tooling: these calls will only happen with RCU enabled, which can
+ * use a regular tracepoint.
+ *
+ * On older architectures, use the rcuidle tracing methods (which
+ * aren't NMI-safe - so exclude NMI contexts):
+ */
+#ifdef CONFIG_ARCH_WANTS_NO_INSTR
+#define trace(point) trace_##point
+#else
+#define trace(point) if (!in_nmi()) trace_##point##_rcuidle
+#endif
+
#ifdef CONFIG_TRACE_IRQFLAGS
/* Per-cpu variable to prevent redundant calls when IRQs already off */
static DEFINE_PER_CPU(int, tracing_irq_cpu);
@@ -28,8 +42,7 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
void trace_hardirqs_on_prepare(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -40,8 +53,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
void trace_hardirqs_on(void)
{
if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
this_cpu_write(tracing_irq_cpu, 0);
}
@@ -63,8 +75,7 @@ void trace_hardirqs_off_finish(void)
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
- if (!in_nmi())
- trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
}
}
@@ -78,56 +89,24 @@ void trace_hardirqs_off(void)
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
- if (!in_nmi())
- trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
}
}
EXPORT_SYMBOL(trace_hardirqs_off);
NOKPROBE_SYMBOL(trace_hardirqs_off);
-
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
-{
- if (this_cpu_read(tracing_irq_cpu)) {
- if (!in_nmi())
- trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
- tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
- this_cpu_write(tracing_irq_cpu, 0);
- }
-
- lockdep_hardirqs_on_prepare();
- lockdep_hardirqs_on(caller_addr);
-}
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
-NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
-
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
-{
- lockdep_hardirqs_off(caller_addr);
-
- if (!this_cpu_read(tracing_irq_cpu)) {
- this_cpu_write(tracing_irq_cpu, 1);
- tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
- if (!in_nmi())
- trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
- }
-}
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
-NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_TRACE_IRQFLAGS */
#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
- if (!in_nmi())
- trace_preempt_enable_rcuidle(a0, a1);
+ trace(preempt_enable)(a0, a1);
tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
- if (!in_nmi())
- trace_preempt_disable_rcuidle(a0, a1);
+ trace(preempt_disable)(a0, a1);
tracer_preempt_off(a0, a1);
}
#endif
diff --git a/lib/bug.c b/lib/bug.c
index c223a2575b72..e0ff21989990 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,6 +47,7 @@
#include <linux/sched.h>
#include <linux/rculist.h>
#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
@@ -153,7 +154,7 @@ struct bug_entry *find_bug(unsigned long bugaddr)
return module_find_bug(bugaddr);
}
-enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+static enum bug_trap_type __report_bug(unsigned long bugaddr, struct pt_regs *regs)
{
struct bug_entry *bug;
const char *file;
@@ -209,6 +210,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
return BUG_TRAP_TYPE_BUG;
}
+enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+{
+ enum bug_trap_type ret;
+ bool rcu = false;
+
+ rcu = warn_rcu_enter();
+ ret = __report_bug(bugaddr, regs);
+ warn_rcu_exit(rcu);
+
+ return ret;
+}
+
static void clear_once_table(struct bug_entry *start, struct bug_entry *end)
{
struct bug_entry *bug;
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 60c7099857a0..4d39e0babb98 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -339,9 +339,10 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
{
struct invalid_value_data *data = _data;
char val_str[VALUE_LENGTH];
+ unsigned long ua_flags = user_access_save();
if (suppress_report(&data->location))
- return;
+ goto out;
ubsan_prologue(&data->location, "invalid-load");
@@ -351,6 +352,8 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
val_str, data->type->type_name);
ubsan_epilogue();
+out:
+ user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index ea8cf1310b1e..71c15438afcf 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -618,6 +618,10 @@ void __asan_set_shadow_f3(const void *addr, size_t size);
void __asan_set_shadow_f5(const void *addr, size_t size);
void __asan_set_shadow_f8(const void *addr, size_t size);
+void *__asan_memset(void *addr, int c, size_t len);
+void *__asan_memmove(void *dest, const void *src, size_t len);
+void *__asan_memcpy(void *dest, const void *src, size_t len);
+
void __hwasan_load1_noabort(unsigned long addr);
void __hwasan_store1_noabort(unsigned long addr);
void __hwasan_load2_noabort(unsigned long addr);
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 15cfb34d16a1..3703983a8e55 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -38,6 +38,12 @@ bool __kasan_check_write(const volatile void *p, unsigned int size)
}
EXPORT_SYMBOL(__kasan_check_write);
+#ifndef CONFIG_GENERIC_ENTRY
+/*
+ * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be
+ * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions
+ * for the sites they want to instrument.
+ */
#undef memset
void *memset(void *addr, int c, size_t len)
{
@@ -68,6 +74,38 @@ void *memcpy(void *dest, const void *src, size_t len)
return __memcpy(dest, src, len);
}
+#endif
+
+void *__asan_memset(void *addr, int c, size_t len)
+{
+ if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_))
+ return NULL;
+
+ return __memset(addr, c, len);
+}
+EXPORT_SYMBOL(__asan_memset);
+
+#ifdef __HAVE_ARCH_MEMMOVE
+void *__asan_memmove(void *dest, const void *src, size_t len)
+{
+ if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
+ !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
+
+ return __memmove(dest, src, len);
+}
+EXPORT_SYMBOL(__asan_memmove);
+#endif
+
+void *__asan_memcpy(void *dest, const void *src, size_t len)
+{
+ if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) ||
+ !kasan_check_range((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
+
+ return __memcpy(dest, src, len);
+}
+EXPORT_SYMBOL(__asan_memcpy);
void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 4b7c8b33069e..b118f588cd2b 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -376,6 +376,7 @@ static int decode_instructions(struct objtool_file *file)
if (!strcmp(sec->name, ".noinstr.text") ||
!strcmp(sec->name, ".entry.text") ||
+ !strcmp(sec->name, ".cpuidle.text") ||
!strncmp(sec->name, ".text.__x86.", 12))
sec->noinstr = true;
@@ -1224,6 +1225,7 @@ static const char *uaccess_safe_builtin[] = {
"__ubsan_handle_type_mismatch",
"__ubsan_handle_type_mismatch_v1",
"__ubsan_handle_shift_out_of_bounds",
+ "__ubsan_handle_load_invalid_value",
/* misc */
"csum_partial_copy_generic",
"copy_mc_fragile",
@@ -3375,6 +3377,12 @@ static inline bool noinstr_call_dest(struct objtool_file *file,
return true;
/*
+ * If the symbol is a static_call trampoline, we can't tell.
+ */
+ if (func->static_call_tramp)
+ return true;
+
+ /*
* The __ubsan_handle_*() calls are like WARN(), they only happen when
* something 'BAD' happened. At the risk of taking the machine down,
* let them proceed to get the message out.
@@ -4171,6 +4179,12 @@ static int validate_noinstr_sections(struct objtool_file *file)
warnings += validate_unwind_hints(file, sec);
}
+ sec = find_section_by_name(file->elf, ".cpuidle.text");
+ if (sec) {
+ warnings += validate_section(file, sec);
+ warnings += validate_unwind_hints(file, sec);
+ }
+
return warnings;
}
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index 3045fdf9bdf5..f74e76d03b7e 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -41,18 +41,6 @@ static void guest_code(void)
GUEST_SYNC(0);
}
-/*
- * We have to perform direct system call for getcpu() because it's
- * not available until glic 2.29.
- */
-static void sys_getcpu(unsigned *cpu)
-{
- int r;
-
- r = syscall(__NR_getcpu, cpu, NULL, NULL);
- TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)", errno, strerror(errno));
-}
-
static int next_cpu(int cpu)
{
/*
@@ -249,7 +237,9 @@ int main(int argc, char *argv[])
* across the seq_cnt reads.
*/
smp_rmb();
- sys_getcpu(&cpu);
+ r = sys_getcpu(&cpu, NULL);
+ TEST_ASSERT(!r, "getcpu failed, errno = %d (%s)",
+ errno, strerror(errno));
rseq_cpu = rseq_current_cpu_raw();
smp_rmb();
} while (snapshot != atomic_read(&seq_cnt));
diff --git a/tools/testing/selftests/membarrier/membarrier_test_impl.h b/tools/testing/selftests/membarrier/membarrier_test_impl.h
index 186be69f0a59..af89855adb7b 100644
--- a/tools/testing/selftests/membarrier/membarrier_test_impl.h
+++ b/tools/testing/selftests/membarrier/membarrier_test_impl.h
@@ -9,11 +9,38 @@
#include "../kselftest.h"
+static int registrations;
+
static int sys_membarrier(int cmd, int flags)
{
return syscall(__NR_membarrier, cmd, flags);
}
+static int test_membarrier_get_registrations(int cmd)
+{
+ int ret, flags = 0;
+ const char *test_name =
+ "sys membarrier MEMBARRIER_CMD_GET_REGISTRATIONS";
+
+ registrations |= cmd;
+
+ ret = sys_membarrier(MEMBARRIER_CMD_GET_REGISTRATIONS, 0);
+ if (ret < 0) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, errno = %d\n",
+ test_name, flags, errno);
+ } else if (ret != registrations) {
+ ksft_exit_fail_msg(
+ "%s test: flags = %d, ret = %d, registrations = %d\n",
+ test_name, flags, ret, registrations);
+ }
+ ksft_test_result_pass(
+ "%s test: flags = %d, ret = %d, registrations = %d\n",
+ test_name, flags, ret, registrations);
+
+ return 0;
+}
+
static int test_membarrier_cmd_fail(void)
{
int cmd = -1, flags = 0;
@@ -113,6 +140,8 @@ static int test_membarrier_register_private_expedited_success(void)
ksft_test_result_pass(
"%s test: flags = %d\n",
test_name, flags);
+
+ test_membarrier_get_registrations(cmd);
return 0;
}
@@ -170,6 +199,8 @@ static int test_membarrier_register_private_expedited_sync_core_success(void)
ksft_test_result_pass(
"%s test: flags = %d\n",
test_name, flags);
+
+ test_membarrier_get_registrations(cmd);
return 0;
}
@@ -204,6 +235,8 @@ static int test_membarrier_register_global_expedited_success(void)
ksft_test_result_pass(
"%s test: flags = %d\n",
test_name, flags);
+
+ test_membarrier_get_registrations(cmd);
return 0;
}
diff --git a/tools/testing/selftests/membarrier/membarrier_test_multi_thread.c b/tools/testing/selftests/membarrier/membarrier_test_multi_thread.c
index ac5613e5b0eb..a9cc17facfb3 100644
--- a/tools/testing/selftests/membarrier/membarrier_test_multi_thread.c
+++ b/tools/testing/selftests/membarrier/membarrier_test_multi_thread.c
@@ -62,7 +62,7 @@ static int test_mt_membarrier(void)
int main(int argc, char **argv)
{
ksft_print_header();
- ksft_set_plan(13);
+ ksft_set_plan(16);
test_membarrier_query();
diff --git a/tools/testing/selftests/membarrier/membarrier_test_single_thread.c b/tools/testing/selftests/membarrier/membarrier_test_single_thread.c
index c1c963902854..4cdc8b1d124c 100644
--- a/tools/testing/selftests/membarrier/membarrier_test_single_thread.c
+++ b/tools/testing/selftests/membarrier/membarrier_test_single_thread.c
@@ -12,7 +12,9 @@
int main(int argc, char **argv)
{
ksft_print_header();
- ksft_set_plan(13);
+ ksft_set_plan(18);
+
+ test_membarrier_get_registrations(/*cmd=*/0);
test_membarrier_query();
@@ -20,5 +22,7 @@ int main(int argc, char **argv)
test_membarrier_success();
+ test_membarrier_get_registrations(/*cmd=*/0);
+
return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/rseq/.gitignore b/tools/testing/selftests/rseq/.gitignore
index 5910888ebfe1..16496de5f6ce 100644
--- a/tools/testing/selftests/rseq/.gitignore
+++ b/tools/testing/selftests/rseq/.gitignore
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
basic_percpu_ops_test
+basic_percpu_ops_mm_cid_test
basic_test
basic_rseq_op_test
param_test
param_test_benchmark
param_test_compare_twice
+param_test_mm_cid
+param_test_mm_cid_benchmark
+param_test_mm_cid_compare_twice
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index 215e1067f037..82a52810a649 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -12,8 +12,9 @@ LDLIBS += -lpthread -ldl
# still track changes to header files and depend on shared object.
OVERRIDE_TARGETS = 1
-TEST_GEN_PROGS = basic_test basic_percpu_ops_test param_test \
- param_test_benchmark param_test_compare_twice
+TEST_GEN_PROGS = basic_test basic_percpu_ops_test basic_percpu_ops_mm_cid_test param_test \
+ param_test_benchmark param_test_compare_twice param_test_mm_cid \
+ param_test_mm_cid_benchmark param_test_mm_cid_compare_twice
TEST_GEN_PROGS_EXTENDED = librseq.so
@@ -29,6 +30,9 @@ $(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h
$(OUTPUT)/%: %.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
$(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@
+$(OUTPUT)/basic_percpu_ops_mm_cid_test: basic_percpu_ops_test.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID_ID $< $(LDLIBS) -lrseq -o $@
+
$(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
rseq.h rseq-*.h
$(CC) $(CFLAGS) -DBENCHMARK $< $(LDLIBS) -lrseq -o $@
@@ -36,3 +40,15 @@ $(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
$(OUTPUT)/param_test_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
rseq.h rseq-*.h
$(CC) $(CFLAGS) -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/param_test_mm_cid: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
+ rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/param_test_mm_cid_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
+ rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID -DBENCHMARK $< $(LDLIBS) -lrseq -o $@
+
+$(OUTPUT)/param_test_mm_cid_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \
+ rseq.h rseq-*.h
+ $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@
diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
index 517756afc2a4..887542961968 100644
--- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c
+++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c
@@ -12,6 +12,32 @@
#include "../kselftest.h"
#include "rseq.h"
+#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
+# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
+static
+int get_current_cpu_id(void)
+{
+ return rseq_current_mm_cid();
+}
+static
+bool rseq_validate_cpu_id(void)
+{
+ return rseq_mm_cid_available();
+}
+#else
+# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
+static
+int get_current_cpu_id(void)
+{
+ return rseq_cpu_start();
+}
+static
+bool rseq_validate_cpu_id(void)
+{
+ return rseq_current_cpu_raw() >= 0;
+}
+#endif
+
struct percpu_lock_entry {
intptr_t v;
} __attribute__((aligned(128)));
@@ -51,9 +77,9 @@ int rseq_this_cpu_lock(struct percpu_lock *lock)
for (;;) {
int ret;
- cpu = rseq_cpu_start();
- ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
- 0, 1, cpu);
+ cpu = get_current_cpu_id();
+ ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ &lock->c[cpu].v, 0, 1, cpu);
if (rseq_likely(!ret))
break;
/* Retry if comparison fails or rseq aborts. */
@@ -141,13 +167,14 @@ void this_cpu_list_push(struct percpu_list *list,
intptr_t *targetptr, newval, expect;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
/* Load list->c[cpu].head with single-copy atomicity. */
expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
newval = (intptr_t)node;
targetptr = (intptr_t *)&list->c[cpu].head;
node->next = (struct percpu_list_node *)expect;
- ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
+ ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, expect, newval, cpu);
if (rseq_likely(!ret))
break;
/* Retry if comparison fails or rseq aborts. */
@@ -170,12 +197,13 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
long offset;
int ret, cpu;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
targetptr = (intptr_t *)&list->c[cpu].head;
expectnot = (intptr_t)NULL;
offset = offsetof(struct percpu_list_node, next);
load = (intptr_t *)&head;
- ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
+ ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, expectnot,
offset, load, cpu);
if (rseq_likely(!ret)) {
if (_cpu)
@@ -295,6 +323,10 @@ int main(int argc, char **argv)
errno, strerror(errno));
goto error;
}
+ if (!rseq_validate_cpu_id()) {
+ fprintf(stderr, "Error: cpu id getter unavailable\n");
+ goto error;
+ }
printf("spinlock\n");
test_percpu_spinlock();
printf("percpu_list\n");
diff --git a/tools/testing/selftests/rseq/basic_test.c b/tools/testing/selftests/rseq/basic_test.c
index d8efbfb89193..295eea16466f 100644
--- a/tools/testing/selftests/rseq/basic_test.c
+++ b/tools/testing/selftests/rseq/basic_test.c
@@ -22,6 +22,8 @@ void test_cpu_pointer(void)
CPU_ZERO(&test_affinity);
for (i = 0; i < CPU_SETSIZE; i++) {
if (CPU_ISSET(i, &affinity)) {
+ int node;
+
CPU_SET(i, &test_affinity);
sched_setaffinity(0, sizeof(test_affinity),
&test_affinity);
@@ -29,6 +31,8 @@ void test_cpu_pointer(void)
assert(rseq_current_cpu() == i);
assert(rseq_current_cpu_raw() == i);
assert(rseq_cpu_start() == i);
+ node = rseq_fallback_current_node();
+ assert(rseq_current_node_id() == node);
CPU_CLR(i, &test_affinity);
}
}
diff --git a/tools/testing/selftests/rseq/compiler.h b/tools/testing/selftests/rseq/compiler.h
index 876eb6a7f75b..f47092bddeba 100644
--- a/tools/testing/selftests/rseq/compiler.h
+++ b/tools/testing/selftests/rseq/compiler.h
@@ -27,4 +27,10 @@
*/
#define rseq_after_asm_goto() asm volatile ("" : : : "memory")
+/* Combine two tokens. */
+#define RSEQ__COMBINE_TOKENS(_tokena, _tokenb) \
+ _tokena##_tokenb
+#define RSEQ_COMBINE_TOKENS(_tokena, _tokenb) \
+ RSEQ__COMBINE_TOKENS(_tokena, _tokenb)
+
#endif /* RSEQ_COMPILER_H_ */
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index ef29bc16f358..bf951a490bb4 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -16,6 +16,7 @@
#include <signal.h>
#include <errno.h>
#include <stddef.h>
+#include <stdbool.h>
static inline pid_t rseq_gettid(void)
{
@@ -36,13 +37,9 @@ static int opt_modulo, verbose;
static int opt_yield, opt_signal, opt_sleep,
opt_disable_rseq, opt_threads = 200,
- opt_disable_mod = 0, opt_test = 's', opt_mb = 0;
+ opt_disable_mod = 0, opt_test = 's';
-#ifndef RSEQ_SKIP_FASTPATH
static long long opt_reps = 5000;
-#else
-static long long opt_reps = 100;
-#endif
static __thread __attribute__((tls_model("initial-exec")))
unsigned int signals_delivered;
@@ -268,6 +265,63 @@ unsigned int yield_mod_cnt, nr_abort;
#include "rseq.h"
+static enum rseq_mo opt_mo = RSEQ_MO_RELAXED;
+
+#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+#define TEST_MEMBARRIER
+
+static int sys_membarrier(int cmd, int flags, int cpu_id)
+{
+ return syscall(__NR_membarrier, cmd, flags, cpu_id);
+}
+#endif
+
+#ifdef BUILDOPT_RSEQ_PERCPU_MM_CID
+# define RSEQ_PERCPU RSEQ_PERCPU_MM_CID
+static
+int get_current_cpu_id(void)
+{
+ return rseq_current_mm_cid();
+}
+static
+bool rseq_validate_cpu_id(void)
+{
+ return rseq_mm_cid_available();
+}
+# ifdef TEST_MEMBARRIER
+/*
+ * Membarrier does not currently support targeting a mm_cid, so
+ * issue the barrier on all cpus.
+ */
+static
+int rseq_membarrier_expedited(int cpu)
+{
+ return sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ,
+ 0, 0);
+}
+# endif /* TEST_MEMBARRIER */
+#else
+# define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID
+static
+int get_current_cpu_id(void)
+{
+ return rseq_cpu_start();
+}
+static
+bool rseq_validate_cpu_id(void)
+{
+ return rseq_current_cpu_raw() >= 0;
+}
+# ifdef TEST_MEMBARRIER
+static
+int rseq_membarrier_expedited(int cpu)
+{
+ return sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ,
+ MEMBARRIER_CMD_FLAG_CPU, cpu);
+}
+# endif /* TEST_MEMBARRIER */
+#endif
+
struct percpu_lock_entry {
intptr_t v;
} __attribute__((aligned(128)));
@@ -355,8 +409,14 @@ static int rseq_this_cpu_lock(struct percpu_lock *lock)
for (;;) {
int ret;
- cpu = rseq_cpu_start();
- ret = rseq_cmpeqv_storev(&lock->c[cpu].v,
+ cpu = get_current_cpu_id();
+ if (cpu < 0) {
+ fprintf(stderr, "pid: %d: tid: %d, cpu: %d: cid: %d\n",
+ getpid(), (int) rseq_gettid(), rseq_current_cpu_raw(), cpu);
+ abort();
+ }
+ ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ &lock->c[cpu].v,
0, 1, cpu);
if (rseq_likely(!ret))
break;
@@ -473,8 +533,9 @@ void *test_percpu_inc_thread(void *arg)
do {
int cpu;
- cpu = rseq_cpu_start();
- ret = rseq_addv(&data->c[cpu].count, 1, cpu);
+ cpu = get_current_cpu_id();
+ ret = rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ &data->c[cpu].count, 1, cpu);
} while (rseq_unlikely(ret));
#ifndef BENCHMARK
if (i != 0 && !(i % (reps / 10)))
@@ -543,13 +604,14 @@ void this_cpu_list_push(struct percpu_list *list,
intptr_t *targetptr, newval, expect;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
/* Load list->c[cpu].head with single-copy atomicity. */
expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head);
newval = (intptr_t)node;
targetptr = (intptr_t *)&list->c[cpu].head;
node->next = (struct percpu_list_node *)expect;
- ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu);
+ ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, expect, newval, cpu);
if (rseq_likely(!ret))
break;
/* Retry if comparison fails or rseq aborts. */
@@ -575,13 +637,14 @@ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list,
long offset;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
targetptr = (intptr_t *)&list->c[cpu].head;
expectnot = (intptr_t)NULL;
offset = offsetof(struct percpu_list_node, next);
load = (intptr_t *)&head;
- ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot,
- offset, load, cpu);
+ ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, expectnot,
+ offset, load, cpu);
if (rseq_likely(!ret)) {
node = head;
break;
@@ -719,7 +782,7 @@ bool this_cpu_buffer_push(struct percpu_buffer *buffer,
intptr_t offset;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
if (offset == buffer->c[cpu].buflen)
break;
@@ -727,14 +790,9 @@ bool this_cpu_buffer_push(struct percpu_buffer *buffer,
targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset];
newval_final = offset + 1;
targetptr_final = &buffer->c[cpu].offset;
- if (opt_mb)
- ret = rseq_cmpeqv_trystorev_storev_release(
- targetptr_final, offset, targetptr_spec,
- newval_spec, newval_final, cpu);
- else
- ret = rseq_cmpeqv_trystorev_storev(targetptr_final,
- offset, targetptr_spec, newval_spec,
- newval_final, cpu);
+ ret = rseq_cmpeqv_trystorev_storev(opt_mo, RSEQ_PERCPU,
+ targetptr_final, offset, targetptr_spec,
+ newval_spec, newval_final, cpu);
if (rseq_likely(!ret)) {
result = true;
break;
@@ -757,7 +815,7 @@ struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer,
intptr_t offset;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
/* Load offset with single-copy atomicity. */
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
if (offset == 0) {
@@ -767,7 +825,8 @@ struct percpu_buffer_node *this_cpu_buffer_pop(struct percpu_buffer *buffer,
head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]);
newval = offset - 1;
targetptr = (intptr_t *)&buffer->c[cpu].offset;
- ret = rseq_cmpeqv_cmpeqv_storev(targetptr, offset,
+ ret = rseq_cmpeqv_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr, offset,
(intptr_t *)&buffer->c[cpu].array[offset - 1],
(intptr_t)head, newval, cpu);
if (rseq_likely(!ret))
@@ -924,7 +983,7 @@ bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer,
size_t copylen;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
/* Load offset with single-copy atomicity. */
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
if (offset == buffer->c[cpu].buflen)
@@ -935,15 +994,11 @@ bool this_cpu_memcpy_buffer_push(struct percpu_memcpy_buffer *buffer,
copylen = sizeof(item);
newval_final = offset + 1;
targetptr_final = &buffer->c[cpu].offset;
- if (opt_mb)
- ret = rseq_cmpeqv_trymemcpy_storev_release(
- targetptr_final, offset,
- destptr, srcptr, copylen,
- newval_final, cpu);
- else
- ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final,
- offset, destptr, srcptr, copylen,
- newval_final, cpu);
+ ret = rseq_cmpeqv_trymemcpy_storev(
+ opt_mo, RSEQ_PERCPU,
+ targetptr_final, offset,
+ destptr, srcptr, copylen,
+ newval_final, cpu);
if (rseq_likely(!ret)) {
result = true;
break;
@@ -968,7 +1023,7 @@ bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer,
size_t copylen;
int ret;
- cpu = rseq_cpu_start();
+ cpu = get_current_cpu_id();
/* Load offset with single-copy atomicity. */
offset = RSEQ_READ_ONCE(buffer->c[cpu].offset);
if (offset == 0)
@@ -979,8 +1034,8 @@ bool this_cpu_memcpy_buffer_pop(struct percpu_memcpy_buffer *buffer,
copylen = sizeof(*item);
newval_final = offset - 1;
targetptr_final = &buffer->c[cpu].offset;
- ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final,
- offset, destptr, srcptr, copylen,
+ ret = rseq_cmpeqv_trymemcpy_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ targetptr_final, offset, destptr, srcptr, copylen,
newval_final, cpu);
if (rseq_likely(!ret)) {
result = true;
@@ -1155,7 +1210,7 @@ static int set_signal_handler(void)
}
/* Test MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU membarrier command. */
-#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+#ifdef TEST_MEMBARRIER
struct test_membarrier_thread_args {
int stop;
intptr_t percpu_list_ptr;
@@ -1182,9 +1237,10 @@ void *test_membarrier_worker_thread(void *arg)
int ret;
do {
- int cpu = rseq_cpu_start();
+ int cpu = get_current_cpu_id();
- ret = rseq_offset_deref_addv(&args->percpu_list_ptr,
+ ret = rseq_offset_deref_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU,
+ &args->percpu_list_ptr,
sizeof(struct percpu_list_entry) * cpu, 1, cpu);
} while (rseq_unlikely(ret));
}
@@ -1221,11 +1277,6 @@ void test_membarrier_free_percpu_list(struct percpu_list *list)
free(list->c[i].head);
}
-static int sys_membarrier(int cmd, int flags, int cpu_id)
-{
- return syscall(__NR_membarrier, cmd, flags, cpu_id);
-}
-
/*
* The manager thread swaps per-cpu lists that worker threads see,
* and validates that there are no unexpected modifications.
@@ -1264,8 +1315,7 @@ void *test_membarrier_manager_thread(void *arg)
/* Make list_b "active". */
atomic_store(&args->percpu_list_ptr, (intptr_t)&list_b);
- if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ,
- MEMBARRIER_CMD_FLAG_CPU, cpu_a) &&
+ if (rseq_membarrier_expedited(cpu_a) &&
errno != ENXIO /* missing CPU */) {
perror("sys_membarrier");
abort();
@@ -1288,8 +1338,7 @@ void *test_membarrier_manager_thread(void *arg)
/* Make list_a "active". */
atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a);
- if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ,
- MEMBARRIER_CMD_FLAG_CPU, cpu_b) &&
+ if (rseq_membarrier_expedited(cpu_b) &&
errno != ENXIO /* missing CPU*/) {
perror("sys_membarrier");
abort();
@@ -1360,7 +1409,7 @@ void test_membarrier(void)
abort();
}
}
-#else /* RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV */
+#else /* TEST_MEMBARRIER */
void test_membarrier(void)
{
fprintf(stderr, "rseq_offset_deref_addv is not implemented on this architecture. "
@@ -1517,7 +1566,7 @@ int main(int argc, char **argv)
verbose = 1;
break;
case 'M':
- opt_mb = 1;
+ opt_mo = RSEQ_MO_RELEASE;
break;
default:
show_usage(argc, argv);
@@ -1537,6 +1586,10 @@ int main(int argc, char **argv)
if (!opt_disable_rseq && rseq_register_current_thread())
goto error;
+ if (!opt_disable_rseq && !rseq_validate_cpu_id()) {
+ fprintf(stderr, "Error: cpu id getter unavailable\n");
+ goto error;
+ }
switch (opt_test) {
case 's':
printf_verbose("spinlock\n");
diff --git a/tools/testing/selftests/rseq/rseq-abi.h b/tools/testing/selftests/rseq/rseq-abi.h
index a8c44d9af71f..fb4ec8a75dd4 100644
--- a/tools/testing/selftests/rseq/rseq-abi.h
+++ b/tools/testing/selftests/rseq/rseq-abi.h
@@ -146,6 +146,28 @@ struct rseq_abi {
* this thread.
*/
__u32 flags;
+
+ /*
+ * Restartable sequences node_id field. Updated by the kernel. Read by
+ * user-space with single-copy atomicity semantics. This field should
+ * only be read by the thread which registered this data structure.
+ * Aligned on 32-bit. Contains the current NUMA node ID.
+ */
+ __u32 node_id;
+
+ /*
+ * Restartable sequences mm_cid field. Updated by the kernel. Read by
+ * user-space with single-copy atomicity semantics. This field should
+ * only be read by the thread which registered this data structure.
+ * Aligned on 32-bit. Contains the current thread's concurrency ID
+ * (allocated uniquely within a memory map).
+ */
+ __u32 mm_cid;
+
+ /*
+ * Flexible array member at end of structure, after last feature field.
+ */
+ char end[];
} __attribute__((aligned(4 * sizeof(__u64))));
#endif /* _RSEQ_ABI_H */
diff --git a/tools/testing/selftests/rseq/rseq-arm-bits.h b/tools/testing/selftests/rseq/rseq-arm-bits.h
new file mode 100644
index 000000000000..4f03cb395462
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-arm-bits.h
@@ -0,0 +1,505 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-arm-bits.h
+ *
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "rseq-bits-template.h"
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+#endif
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expectnot], r0\n\t"
+ "beq %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expectnot], r0\n\t"
+ "beq %l[error2]\n\t"
+#endif
+ "str r0, %[load]\n\t"
+ "add r0, %[voffp]\n\t"
+ "ldr r0, [r0]\n\t"
+ /* final store */
+ "str r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "Ir" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ "ldr r0, %[v]\n\t"
+ "add r0, %[count]\n\t"
+ /* final store */
+ "str r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [count] "Ir" (count)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ "ldr r0, %[v2]\n\t"
+ "cmp %[expect2], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+ "ldr r0, %[v2]\n\t"
+ "cmp %[expect2], r0\n\t"
+ "bne %l[error3]\n\t"
+#endif
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_after_asm_goto();
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne %l[error2]\n\t"
+#endif
+ /* try store */
+ "str %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ "dmb\n\t" /* full mb provides store-release */
+#endif
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint32_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ "str %[src], %[rseq_scratch0]\n\t"
+ "str %[dst], %[rseq_scratch1]\n\t"
+ "str %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ "ldr r0, %[v]\n\t"
+ "cmp %[expect], r0\n\t"
+ "bne 7f\n\t"
+#endif
+ /* try memcpy */
+ "cmp %[len], #0\n\t" \
+ "beq 333f\n\t" \
+ "222:\n\t" \
+ "ldrb %%r0, [%[src]]\n\t" \
+ "strb %%r0, [%[dst]]\n\t" \
+ "adds %[src], #1\n\t" \
+ "adds %[dst], #1\n\t" \
+ "subs %[len], #1\n\t" \
+ "bne 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ "dmb\n\t" /* full mb provides store-release */
+#endif
+ /* final store */
+ "str %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ "ldr %[len], %[rseq_scratch2]\n\t"
+ "ldr %[dst], %[rseq_scratch1]\n\t"
+ "ldr %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "r0", "memory", "cc"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
index 893a11eca9d5..8414fc3eac15 100644
--- a/tools/testing/selftests/rseq/rseq-arm.h
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -2,7 +2,7 @@
/*
* rseq-arm.h
*
- * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
/*
@@ -79,10 +79,6 @@ do { \
RSEQ_WRITE_ONCE(*p, v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
".pushsection __rseq_cs, \"aw\"\n\t" \
@@ -147,681 +143,34 @@ do { \
teardown \
"b %l[" __rseq_str(cmpfail_label) "]\n\t"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[error2]\n\t"
-#endif
- /* final store */
- "str %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expectnot], r0\n\t"
- "beq %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- "ldr r0, %[v]\n\t"
- "cmp %[expectnot], r0\n\t"
- "beq %l[error2]\n\t"
-#endif
- "str r0, %[load]\n\t"
- "add r0, %[voffp]\n\t"
- "ldr r0, [r0]\n\t"
- /* final store */
- "str r0, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [voffp] "Ir" (voffp),
- [load] "m" (*load)
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
-#endif
- "ldr r0, %[v]\n\t"
- "add r0, %[count]\n\t"
- /* final store */
- "str r0, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(4)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [count] "Ir" (count)
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[error2]\n\t"
-#endif
- /* try store */
- "str %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- /* final store */
- "str %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+/* Per-cpu-id indexing. */
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[error2]\n\t"
-#endif
- /* try store */
- "str %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- "dmb\n\t" /* full mb provides store-release */
- /* final store */
- "str %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-arm-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-arm-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
- "ldr r0, %[v2]\n\t"
- "cmp %[expect2], r0\n\t"
- "bne %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne %l[error2]\n\t"
- "ldr r0, %[v2]\n\t"
- "cmp %[expect2], r0\n\t"
- "bne %l[error3]\n\t"
-#endif
- /* final store */
- "str %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* cmp2 input */
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("1st expected value comparison failed");
-error3:
- rseq_after_asm_goto();
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
+/* Per-mm-cid indexing. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uint32_t rseq_scratch[3];
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-arm-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-arm-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- "str %[src], %[rseq_scratch0]\n\t"
- "str %[dst], %[rseq_scratch1]\n\t"
- "str %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne 7f\n\t"
-#endif
- /* try memcpy */
- "cmp %[len], #0\n\t" \
- "beq 333f\n\t" \
- "222:\n\t" \
- "ldrb %%r0, [%[src]]\n\t" \
- "strb %%r0, [%[dst]]\n\t" \
- "adds %[src], #1\n\t" \
- "adds %[dst], #1\n\t" \
- "subs %[len], #1\n\t" \
- "bne 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- /* final store */
- "str %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t"
- "b 8f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- abort, 1b, 2b, 4f)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- error2)
-#endif
- "8:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uint32_t rseq_scratch[3];
-
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- "str %[src], %[rseq_scratch0]\n\t"
- "str %[dst], %[rseq_scratch1]\n\t"
- "str %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
- "ldr r0, %[v]\n\t"
- "cmp %[expect], r0\n\t"
- "bne 7f\n\t"
-#endif
- /* try memcpy */
- "cmp %[len], #0\n\t" \
- "beq 333f\n\t" \
- "222:\n\t" \
- "ldrb %%r0, [%[src]]\n\t" \
- "strb %%r0, [%[dst]]\n\t" \
- "adds %[src], #1\n\t" \
- "adds %[dst], #1\n\t" \
- "subs %[len], #1\n\t" \
- "bne 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- "dmb\n\t" /* full mb provides store-release */
- /* final store */
- "str %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t"
- "b 8f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- abort, 1b, 2b, 4f)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- /* teardown */
- "ldr %[len], %[rseq_scratch2]\n\t"
- "ldr %[dst], %[rseq_scratch1]\n\t"
- "ldr %[src], %[rseq_scratch0]\n\t",
- error2)
-#endif
- "8:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- RSEQ_INJECT_INPUT
- : "r0", "memory", "cc"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+/* APIs which are not based on cpu ids. */
-#endif /* !RSEQ_SKIP_FASTPATH */
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-arm-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq-arm64-bits.h b/tools/testing/selftests/rseq/rseq-arm64-bits.h
new file mode 100644
index 000000000000..cc7226b1efe1
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-arm64-bits.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-arm64-bits.h
+ *
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2018 - Will Deacon <will.deacon@arm.com>
+ */
+
+#include "rseq-bits-template.h"
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "Qo" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
+#endif
+ RSEQ_ASM_OP_R_LOAD(v)
+ RSEQ_ASM_OP_R_STORE(load)
+ RSEQ_ASM_OP_R_LOAD_OFF(voffp)
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "Qo" (*v),
+ [expectnot] "r" (expectnot),
+ [load] "Qo" (*load),
+ [voffp] "r" (voffp)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ RSEQ_ASM_OP_R_LOAD(v)
+ RSEQ_ASM_OP_R_ADD(count)
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "Qo" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error3])
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
+#endif
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "Qo" (*v),
+ [expect] "r" (expect),
+ [v2] "Qo" (*v2),
+ [expect2] "r" (expect2),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+error3:
+ rseq_after_asm_goto();
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
+#else
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+#endif
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [expect] "r" (expect),
+ [v] "Qo" (*v),
+ [newv] "r" (newv),
+ [v2] "Qo" (*v2),
+ [newv2] "r" (newv2)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
+#else
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+#endif
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "Qo" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [expect] "r" (expect),
+ [v] "Qo" (*v),
+ [newv] "r" (newv),
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-arm64.h b/tools/testing/selftests/rseq/rseq-arm64.h
index cbe190a4d005..85b90977e7e6 100644
--- a/tools/testing/selftests/rseq/rseq-arm64.h
+++ b/tools/testing/selftests/rseq/rseq-arm64.h
@@ -2,7 +2,7 @@
/*
* rseq-arm64.h
*
- * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* (C) Copyright 2018 - Will Deacon <will.deacon@arm.com>
*/
@@ -85,10 +85,6 @@ do { \
} \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
#define RSEQ_ASM_TMP_REG32 "w15"
#define RSEQ_ASM_TMP_REG "x15"
#define RSEQ_ASM_TMP_REG_2 "x14"
@@ -204,492 +200,34 @@ do { \
" cbnz " RSEQ_ASM_TMP_REG_2 ", 222b\n" \
"333:\n"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "Qo" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
-#endif
- RSEQ_ASM_OP_R_LOAD(v)
- RSEQ_ASM_OP_R_STORE(load)
- RSEQ_ASM_OP_R_LOAD_OFF(voffp)
- RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "Qo" (*v),
- [expectnot] "r" (expectnot),
- [load] "Qo" (*load),
- [voffp] "r" (voffp)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+/* Per-cpu-id indexing. */
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-arm64-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
-#endif
- RSEQ_ASM_OP_R_LOAD(v)
- RSEQ_ASM_OP_R_ADD(count)
- RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "Qo" (*v),
- [count] "r" (count)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- RSEQ_ASM_OP_STORE(newv2, v2)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "Qo" (*v),
- [newv] "r" (newv),
- [v2] "Qo" (*v2),
- [newv2] "r" (newv2)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- RSEQ_ASM_OP_STORE(newv2, v2)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "Qo" (*v),
- [newv] "r" (newv),
- [v2] "Qo" (*v2),
- [newv2] "r" (newv2)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error3])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
- RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
-#endif
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "Qo" (*v),
- [expect] "r" (expect),
- [v2] "Qo" (*v2),
- [expect2] "r" (expect2),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-error3:
- rseq_after_asm_goto();
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "Qo" (*v),
- [newv] "r" (newv),
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, %l[error2])
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "Qo" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "Qo" (*v),
- [newv] "r" (newv),
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG, RSEQ_ASM_TMP_REG_2
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-arm64-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
+
+/* Per-mm-cid indexing. */
+
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-arm64-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-arm64-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
+
+/* APIs which are not based on cpu ids. */
-#endif /* !RSEQ_SKIP_FASTPATH */
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-arm64-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq-bits-reset.h b/tools/testing/selftests/rseq/rseq-bits-reset.h
new file mode 100644
index 000000000000..e8655089f9cb
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-bits-reset.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-bits-reset.h
+ *
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef RSEQ_TEMPLATE_IDENTIFIER
+#undef RSEQ_TEMPLATE_CPU_ID_FIELD
+#undef RSEQ_TEMPLATE_CPU_ID_OFFSET
+#undef RSEQ_TEMPLATE_SUFFIX
diff --git a/tools/testing/selftests/rseq/rseq-bits-template.h b/tools/testing/selftests/rseq/rseq-bits-template.h
new file mode 100644
index 000000000000..65698d6a6cf9
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-bits-template.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-bits-template.h
+ *
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifdef RSEQ_TEMPLATE_CPU_ID
+# define RSEQ_TEMPLATE_CPU_ID_OFFSET RSEQ_CPU_ID_OFFSET
+# define RSEQ_TEMPLATE_CPU_ID_FIELD cpu_id
+# ifdef RSEQ_TEMPLATE_MO_RELEASE
+# define RSEQ_TEMPLATE_SUFFIX _release_cpu_id
+# elif defined (RSEQ_TEMPLATE_MO_RELAXED)
+# define RSEQ_TEMPLATE_SUFFIX _relaxed_cpu_id
+# else
+# error "Never use <rseq-bits-template.h> directly; include <rseq.h> instead."
+# endif
+#elif defined(RSEQ_TEMPLATE_MM_CID)
+# define RSEQ_TEMPLATE_CPU_ID_OFFSET RSEQ_MM_CID_OFFSET
+# define RSEQ_TEMPLATE_CPU_ID_FIELD mm_cid
+# ifdef RSEQ_TEMPLATE_MO_RELEASE
+# define RSEQ_TEMPLATE_SUFFIX _release_mm_cid
+# elif defined (RSEQ_TEMPLATE_MO_RELAXED)
+# define RSEQ_TEMPLATE_SUFFIX _relaxed_mm_cid
+# else
+# error "Never use <rseq-bits-template.h> directly; include <rseq.h> instead."
+# endif
+#elif defined (RSEQ_TEMPLATE_CPU_ID_NONE)
+# ifdef RSEQ_TEMPLATE_MO_RELEASE
+# define RSEQ_TEMPLATE_SUFFIX _release
+# elif defined (RSEQ_TEMPLATE_MO_RELAXED)
+# define RSEQ_TEMPLATE_SUFFIX _relaxed
+# else
+# error "Never use <rseq-bits-template.h> directly; include <rseq.h> instead."
+# endif
+#else
+# error "Never use <rseq-bits-template.h> directly; include <rseq.h> instead."
+#endif
+
+#define RSEQ_TEMPLATE_IDENTIFIER(x) RSEQ_COMBINE_TOKENS(x, RSEQ_TEMPLATE_SUFFIX)
+
diff --git a/tools/testing/selftests/rseq/rseq-mips-bits.h b/tools/testing/selftests/rseq/rseq-mips-bits.h
new file mode 100644
index 000000000000..6c48af4d0944
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-mips-bits.h
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "rseq-bits-template.h"
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "beq $4, %[expectnot], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+ LONG_S " $4, %[load]\n\t"
+ LONG_ADDI " $4, %[voffp]\n\t"
+ LONG_L " $4, 0($4)\n\t"
+ /* final store */
+ LONG_S " $4, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "Ir" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ LONG_L " $4, %[v]\n\t"
+ LONG_ADDI " $4, %[count]\n\t"
+ /* final store */
+ LONG_S " $4, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [count] "Ir" (count)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ LONG_L " $4, %[v2]\n\t"
+ "bne $4, %[expect2], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+ LONG_L " $4, %[v2]\n\t"
+ "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ "sync\n\t" /* full sync provides store-release */
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uintptr_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 7f\n\t"
+#endif
+ /* try memcpy */
+ "beqz %[len], 333f\n\t" \
+ "222:\n\t" \
+ "lb $4, 0(%[src])\n\t" \
+ "sb $4, 0(%[dst])\n\t" \
+ LONG_ADDI " %[src], 1\n\t" \
+ LONG_ADDI " %[dst], 1\n\t" \
+ LONG_ADDI " %[len], -1\n\t" \
+ "bnez %[len], 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ "sync\n\t" /* full sync provides store-release */
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
index 878739fae2fd..50b950cf9585 100644
--- a/tools/testing/selftests/rseq/rseq-mips.h
+++ b/tools/testing/selftests/rseq/rseq-mips.h
@@ -2,9 +2,7 @@
/*
* Author: Paul Burton <paul.burton@mips.com>
* (C) Copyright 2018 MIPS Tech LLC
- *
- * Based on rseq-arm.h:
- * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
/*
@@ -60,10 +58,6 @@ do { \
RSEQ_WRITE_ONCE(*p, v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
#if _MIPS_SZLONG == 64
# define LONG ".dword"
# define LONG_LA "dla"
@@ -154,624 +148,34 @@ do { \
teardown \
"b %l[" __rseq_str(cmpfail_label) "]\n\t"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[error2]\n\t"
-#endif
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "beq $4, %[expectnot], %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_L " $4, %[v]\n\t"
- "beq $4, %[expectnot], %l[error2]\n\t"
-#endif
- LONG_S " $4, %[load]\n\t"
- LONG_ADDI " $4, %[voffp]\n\t"
- LONG_L " $4, 0($4)\n\t"
- /* final store */
- LONG_S " $4, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [voffp] "Ir" (voffp),
- [load] "m" (*load)
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
+/* Per-cpu-id indexing. */
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-mips-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
-#endif
- LONG_L " $4, %[v]\n\t"
- LONG_ADDI " $4, %[count]\n\t"
- /* final store */
- LONG_S " $4, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(4)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [count] "Ir" (count)
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-mips-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+/* Per-mm-cid indexing. */
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[error2]\n\t"
-#endif
- /* try store */
- LONG_S " %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-mips-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-mips-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[error2]\n\t"
-#endif
- /* try store */
- LONG_S " %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- "sync\n\t" /* full sync provides store-release */
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
- LONG_L " $4, %[v2]\n\t"
- "bne $4, %[expect2], %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], %l[error2]\n\t"
- LONG_L " $4, %[v2]\n\t"
- "bne $4, %[expect2], %l[error3]\n\t"
-#endif
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- "b 5f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
- "5:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* cmp2 input */
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("1st expected value comparison failed");
-error3:
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uintptr_t rseq_scratch[3];
-
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- LONG_S " %[src], %[rseq_scratch0]\n\t"
- LONG_S " %[dst], %[rseq_scratch1]\n\t"
- LONG_S " %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], 7f\n\t"
-#endif
- /* try memcpy */
- "beqz %[len], 333f\n\t" \
- "222:\n\t" \
- "lb $4, 0(%[src])\n\t" \
- "sb $4, 0(%[dst])\n\t" \
- LONG_ADDI " %[src], 1\n\t" \
- LONG_ADDI " %[dst], 1\n\t" \
- LONG_ADDI " %[len], -1\n\t" \
- "bnez %[len], 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t"
- "b 8f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- abort, 1b, 2b, 4f)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- error2)
-#endif
- "8:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uintptr_t rseq_scratch[3];
-
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(9, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- LONG_S " %[src], %[rseq_scratch0]\n\t"
- LONG_S " %[dst], %[rseq_scratch1]\n\t"
- LONG_S " %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
- LONG_L " $4, %[v]\n\t"
- "bne $4, %[expect], 7f\n\t"
-#endif
- /* try memcpy */
- "beqz %[len], 333f\n\t" \
- "222:\n\t" \
- "lb $4, 0(%[src])\n\t" \
- "sb $4, 0(%[dst])\n\t" \
- LONG_ADDI " %[src], 1\n\t" \
- LONG_ADDI " %[dst], 1\n\t" \
- LONG_ADDI " %[len], -1\n\t" \
- "bnez %[len], 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- "sync\n\t" /* full sync provides store-release */
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t"
- "b 8f\n\t"
- RSEQ_ASM_DEFINE_ABORT(3, 4,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- abort, 1b, 2b, 4f)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- error2)
-#endif
- "8:\n\t"
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- RSEQ_INJECT_INPUT
- : "$4", "memory"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
+/* APIs which are not based on cpu ids. */
-#endif /* !RSEQ_SKIP_FASTPATH */
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-mips-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq-ppc-bits.h b/tools/testing/selftests/rseq/rseq-ppc-bits.h
new file mode 100644
index 000000000000..98e69eae1e62
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-ppc-bits.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-ppc-bits.h
+ *
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2018 - Boqun Feng <boqun.feng@gmail.com>
+ */
+
+#include "rseq-bits-template.h"
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v not equal to @expectnot */
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v not equal to @expectnot */
+ RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
+#endif
+ /* load the value of @v */
+ RSEQ_ASM_OP_R_LOAD(v)
+ /* store it in @load */
+ RSEQ_ASM_OP_R_STORE(load)
+ /* dereference voffp(v) */
+ RSEQ_ASM_OP_R_LOADX(voffp)
+ /* final store the value at voffp(v) */
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 2)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "b" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ /* load the value of @v */
+ RSEQ_ASM_OP_R_LOAD(v)
+ /* add @count to it */
+ RSEQ_ASM_OP_R_ADD(count)
+ /* final store */
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 2)
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+ /* cmp @v2 equal to @expct2 */
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+ /* cmp @v2 equal to @expct2 */
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
+#endif
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_after_asm_goto();
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* try store */
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ /* for 'release' */
+ "lwsync\n\t"
+#endif
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* setup for mempcy */
+ "mr %%r19, %[len]\n\t"
+ "mr %%r20, %[src]\n\t"
+ "mr %%r21, %[dst]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ /* cmp cpuid */
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ /* cmp @v equal to @expect */
+ RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
+#endif
+ /* try memcpy */
+ RSEQ_ASM_OP_R_MEMCPY()
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ /* for 'release' */
+ "lwsync\n\t"
+#endif
+ /* final store */
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r17", "r18", "r19", "r20", "r21"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-ppc.h b/tools/testing/selftests/rseq/rseq-ppc.h
index bab8e0b9fb11..dc9190facee9 100644
--- a/tools/testing/selftests/rseq/rseq-ppc.h
+++ b/tools/testing/selftests/rseq/rseq-ppc.h
@@ -2,7 +2,7 @@
/*
* rseq-ppc.h
*
- * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* (C) Copyright 2016-2018 - Boqun Feng <boqun.feng@gmail.com>
*/
@@ -36,10 +36,6 @@ do { \
RSEQ_WRITE_ONCE(*p, v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
/*
* The __rseq_cs_ptr_array and __rseq_cs sections can be used by debuggers to
* better handle single-stepping through the restartable critical sections.
@@ -209,583 +205,34 @@ do { \
RSEQ_STORE_LONG(var) "%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n\t" \
__rseq_str(post_commit_label) ":\n\t"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- /* final store */
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v not equal to @expectnot */
- RSEQ_ASM_OP_CMPNE(v, expectnot, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v not equal to @expectnot */
- RSEQ_ASM_OP_CMPNE(v, expectnot, %l[error2])
-#endif
- /* load the value of @v */
- RSEQ_ASM_OP_R_LOAD(v)
- /* store it in @load */
- RSEQ_ASM_OP_R_STORE(load)
- /* dereference voffp(v) */
- RSEQ_ASM_OP_R_LOADX(voffp)
- /* final store the value at voffp(v) */
- RSEQ_ASM_OP_R_FINAL_STORE(v, 2)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [voffp] "b" (voffp),
- [load] "m" (*load)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
-#endif
- /* load the value of @v */
- RSEQ_ASM_OP_R_LOAD(v)
- /* add @count to it */
- RSEQ_ASM_OP_R_ADD(count)
- /* final store */
- RSEQ_ASM_OP_R_FINAL_STORE(v, 2)
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [count] "r" (count)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- /* try store */
- RSEQ_ASM_OP_STORE(newv2, v2)
- RSEQ_INJECT_ASM(5)
- /* final store */
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- /* try store */
- RSEQ_ASM_OP_STORE(newv2, v2)
- RSEQ_INJECT_ASM(5)
- /* for 'release' */
- "lwsync\n\t"
- /* final store */
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
- /* cmp @v2 equal to @expct2 */
- RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[cmpfail])
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
- /* cmp @v2 equal to @expct2 */
- RSEQ_ASM_OP_CMPEQ(v2, expect2, %l[error3])
-#endif
- /* final store */
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* cmp2 input */
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("1st expected value comparison failed");
-error3:
- rseq_after_asm_goto();
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* setup for mempcy */
- "mr %%r19, %[len]\n\t"
- "mr %%r20, %[src]\n\t"
- "mr %%r21, %[dst]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- /* try memcpy */
- RSEQ_ASM_OP_R_MEMCPY()
- RSEQ_INJECT_ASM(5)
- /* final store */
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
- RSEQ_INJECT_ASM(6)
- /* teardown */
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17", "r18", "r19", "r20", "r21"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* setup for mempcy */
- "mr %%r19, %[len]\n\t"
- "mr %%r20, %[src]\n\t"
- "mr %%r21, %[dst]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[cmpfail])
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- /* cmp cpuid */
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- /* cmp @v equal to @expect */
- RSEQ_ASM_OP_CMPEQ(v, expect, %l[error2])
-#endif
- /* try memcpy */
- RSEQ_ASM_OP_R_MEMCPY()
- RSEQ_INJECT_ASM(5)
- /* for 'release' */
- "lwsync\n\t"
- /* final store */
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 2)
- RSEQ_INJECT_ASM(6)
- /* teardown */
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r17", "r18", "r19", "r20", "r21"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-#endif /* !RSEQ_SKIP_FASTPATH */
+/* Per-cpu-id indexing. */
+
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-ppc-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-ppc-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
+
+/* Per-mm-cid indexing. */
+
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-ppc-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-ppc-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
+
+/* APIs which are not based on cpu ids. */
+
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-ppc-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h
new file mode 100644
index 000000000000..de31a0143139
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h
@@ -0,0 +1,410 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#include "rseq-bits-template.h"
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
+#endif
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPNE(v, expectnot, "%l[cmpfail]")
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ RSEQ_ASM_OP_CMPNE(v, expectnot, "%l[error2]")
+#endif
+ RSEQ_ASM_OP_R_LOAD(v)
+ RSEQ_ASM_OP_R_STORE(load)
+ RSEQ_ASM_OP_R_LOAD_OFF(voffp)
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [load] "m" (*load),
+ [voffp] "r" (voffp)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+#endif
+ RSEQ_ASM_OP_R_LOAD(v)
+ RSEQ_ASM_OP_R_ADD(count)
+ RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error3]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, "%l[cmpfail]")
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
+ RSEQ_ASM_OP_CMPEQ(v2, expect2, "%l[error3]")
+#endif
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#define RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+
+/*
+ * pval = *(ptr+off)
+ * *pval += inc;
+ */
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, intptr_t inc, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+#endif
+ RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [ptr] "r" (ptr),
+ [off] "er" (off),
+ [inc] "er" (inc)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
+#endif
+ RSEQ_ASM_OP_STORE(newv2, v2)
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
+#else
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+#endif
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [expect] "r" (expect),
+ [v] "m" (*v),
+ [newv] "r" (newv),
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __always_inline
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+ __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
+ RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
+#endif
+ RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
+ RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
+#endif
+ RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
+#else
+ RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
+#endif
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [expect] "r" (expect),
+ [v] "m" (*v),
+ [newv] "r" (newv),
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len)
+ RSEQ_INJECT_INPUT
+ : "memory", RSEQ_ASM_TMP_REG_1, RSEQ_ASM_TMP_REG_2,
+ RSEQ_ASM_TMP_REG_3, RSEQ_ASM_TMP_REG_4
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h
index 3a391c9bf468..17932a79e066 100644
--- a/tools/testing/selftests/rseq/rseq-riscv.h
+++ b/tools/testing/selftests/rseq/rseq-riscv.h
@@ -49,10 +49,6 @@ do { \
RSEQ_WRITE_ONCE(*(p), v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
post_commit_offset, abort_ip) \
".pushsection __rseq_cs, \"aw\"\n" \
@@ -169,509 +165,34 @@ do { \
RSEQ_ASM_OP_R_ADD(inc) \
__rseq_str(post_commit_label) ":\n"
-static inline __always_inline
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+/* Per-cpu-id indexing. */
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
-#endif
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
-
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __always_inline
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- off_t voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPNE(v, expectnot, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPNE(v, expectnot, "%l[error2]")
-#endif
- RSEQ_ASM_OP_R_LOAD(v)
- RSEQ_ASM_OP_R_STORE(load)
- RSEQ_ASM_OP_R_LOAD_OFF(voffp)
- RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [load] "m" (*load),
- [voffp] "r" (voffp)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-riscv-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
-static inline __always_inline
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-riscv-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
-#endif
- RSEQ_ASM_OP_R_LOAD(v)
- RSEQ_ASM_OP_R_ADD(count)
- RSEQ_ASM_OP_R_FINAL_STORE(v, 3)
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [count] "r" (count)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-static inline __always_inline
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
-#endif
- RSEQ_ASM_OP_STORE(newv2, v2)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "m" (*v),
- [newv] "r" (newv),
- [v2] "m" (*v2),
- [newv2] "r" (newv2)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
-
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __always_inline
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
-#endif
- RSEQ_ASM_OP_STORE(newv2, v2)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "m" (*v),
- [newv] "r" (newv),
- [v2] "m" (*v2),
- [newv2] "r" (newv2)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
-
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __always_inline
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error3]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_OP_CMPEQ(v2, expect2, "%l[cmpfail]")
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
- RSEQ_ASM_OP_CMPEQ(v2, expect2, "%l[error3]")
-#endif
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expect] "r" (expect),
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
-
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-error3:
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-
-static inline __always_inline
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
-#endif
- RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "m" (*v),
- [newv] "r" (newv),
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1, RSEQ_ASM_TMP_REG_2,
- RSEQ_ASM_TMP_REG_3, RSEQ_ASM_TMP_REG_4
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
-
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __always_inline
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[cmpfail]")
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error2]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[cmpfail]")
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
- RSEQ_ASM_OP_CMPEQ(v, expect, "%l[error2]")
-#endif
- RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_OP_FINAL_STORE_RELEASE(newv, v, 3)
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [expect] "r" (expect),
- [v] "m" (*v),
- [newv] "r" (newv),
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1, RSEQ_ASM_TMP_REG_2,
- RSEQ_ASM_TMP_REG_3, RSEQ_ASM_TMP_REG_4
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
-
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_bug("expected value comparison failed");
-#endif
-}
+/* Per-mm-cid indexing. */
-#define RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-riscv-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
-/*
- * pval = *(ptr+off)
- * *pval += inc;
- */
-static inline __always_inline
-int rseq_offset_deref_addv(intptr_t *ptr, off_t off, intptr_t inc, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto(RSEQ_ASM_DEFINE_TABLE(1, 2f, 3f, 4f)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(2f, "%l[error1]")
-#endif
- RSEQ_ASM_STORE_RSEQ_CS(2, 1b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]")
-#endif
- RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3)
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [ptr] "r" (ptr),
- [off] "er" (off),
- [inc] "er" (inc)
- RSEQ_INJECT_INPUT
- : "memory", RSEQ_ASM_TMP_REG_1
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-riscv-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
+
+/* APIs which are not based on cpu ids. */
-#endif /* !RSEQ_SKIP_FASTPATH */
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-riscv-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq-s390-bits.h b/tools/testing/selftests/rseq/rseq-s390-bits.h
new file mode 100644
index 000000000000..0cf17d9f170a
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-s390-bits.h
@@ -0,0 +1,474 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+
+#include "rseq-bits-template.h"
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " %%r1, %[v]\n\t"
+ LONG_CMP_R " %%r1, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " %%r1, %[v]\n\t"
+ LONG_CMP_R " %%r1, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ LONG_S " %%r1, %[load]\n\t"
+ LONG_ADD_R " %%r1, %[voffp]\n\t"
+ LONG_L " %%r1, 0(%%r1)\n\t"
+ /* final store */
+ LONG_S " %%r1, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "r" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0", "r1"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ LONG_L " %%r0, %[v]\n\t"
+ LONG_ADD_R " %%r0, %[count]\n\t"
+ /* final store */
+ LONG_S " %%r0, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "r" (count)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ LONG_CMP " %[expect2], %[v2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+ LONG_CMP " %[expect2], %[v2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_after_asm_goto();
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/* s390 is TSO. */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint64_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_CMP " %[expect], %[v]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ LONG_LT_R " %[len], %[len]\n\t"
+ "jz 333f\n\t"
+ "222:\n\t"
+ "ic %%r0,0(%[src])\n\t"
+ "stc %%r0,0(%[dst])\n\t"
+ LONG_ADDI " %[src], 1\n\t"
+ LONG_ADDI " %[dst], 1\n\t"
+ LONG_ADDI " %[len], -1\n\t"
+ "jnz 222b\n\t"
+ "333:\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD),
+ [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "memory", "cc", "r0"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-s390.h b/tools/testing/selftests/rseq/rseq-s390.h
index 4e6dc5f0cb42..46c92598acc7 100644
--- a/tools/testing/selftests/rseq/rseq-s390.h
+++ b/tools/testing/selftests/rseq/rseq-s390.h
@@ -28,10 +28,6 @@ do { \
RSEQ_WRITE_ONCE(*p, v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
#ifdef __s390x__
#define LONG_L "lg"
@@ -134,477 +130,34 @@ do { \
"jg %l[" __rseq_str(cmpfail_label) "]\n\t" \
".popsection\n\t"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+/* Per-cpu-id indexing. */
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r0"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-s390-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
-/*
- * Compare @v against @expectnot. When it does _not_ match, load @v
- * into @load, and store the content of *@v + voffp into @v.
- */
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_L " %%r1, %[v]\n\t"
- LONG_CMP_R " %%r1, %[expectnot]\n\t"
- "je %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_L " %%r1, %[v]\n\t"
- LONG_CMP_R " %%r1, %[expectnot]\n\t"
- "je %l[error2]\n\t"
-#endif
- LONG_S " %%r1, %[load]\n\t"
- LONG_ADD_R " %%r1, %[voffp]\n\t"
- LONG_L " %%r1, 0(%%r1)\n\t"
- /* final store */
- LONG_S " %%r1, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [voffp] "r" (voffp),
- [load] "m" (*load)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r0", "r1"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-s390-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
+/* Per-mm-cid indexing. */
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
-#endif
- LONG_L " %%r0, %[v]\n\t"
- LONG_ADD_R " %%r0, %[count]\n\t"
- /* final store */
- LONG_S " %%r0, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [count] "r" (count)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r0"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-s390-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-s390-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* try store */
- LONG_S " %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r0"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-/* s390 is TSO. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
- LONG_CMP " %[expect2], %[v2]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz %l[error2]\n\t"
- LONG_CMP " %[expect2], %[v2]\n\t"
- "jnz %l[error3]\n\t"
-#endif
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* cmp2 input */
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r0"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("1st expected value comparison failed");
-error3:
- rseq_after_asm_goto();
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uint64_t rseq_scratch[3];
-
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- LONG_S " %[src], %[rseq_scratch0]\n\t"
- LONG_S " %[dst], %[rseq_scratch1]\n\t"
- LONG_S " %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, rseq_cs)
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
- RSEQ_INJECT_ASM(3)
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
- LONG_CMP " %[expect], %[v]\n\t"
- "jnz 7f\n\t"
-#endif
- /* try memcpy */
- LONG_LT_R " %[len], %[len]\n\t"
- "jz 333f\n\t"
- "222:\n\t"
- "ic %%r0,0(%[src])\n\t"
- "stc %%r0,0(%[dst])\n\t"
- LONG_ADDI " %[src], 1\n\t"
- LONG_ADDI " %[dst], 1\n\t"
- LONG_ADDI " %[len], -1\n\t"
- "jnz 222b\n\t"
- "333:\n\t"
- RSEQ_INJECT_ASM(5)
- /* final store */
- LONG_S " %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t"
- RSEQ_ASM_DEFINE_ABORT(4,
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- abort)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- LONG_L " %[len], %[rseq_scratch2]\n\t"
- LONG_L " %[dst], %[rseq_scratch1]\n\t"
- LONG_L " %[src], %[rseq_scratch0]\n\t",
- error2)
-#endif
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [current_cpu_id] "m" (rseq_get_abi()->cpu_id),
- [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- RSEQ_INJECT_INPUT
- : "memory", "cc", "r0"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+/* APIs which are not based on cpu ids. */
-/* s390 is TSO. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
- newv, cpu);
-}
-#endif /* !RSEQ_SKIP_FASTPATH */
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-s390-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq-skip.h b/tools/testing/selftests/rseq/rseq-skip.h
deleted file mode 100644
index 7b53dac1fcdd..000000000000
--- a/tools/testing/selftests/rseq/rseq-skip.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
-/*
- * rseq-skip.h
- *
- * (C) Copyright 2017-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- return -1;
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- return -1;
-}
diff --git a/tools/testing/selftests/rseq/rseq-x86-bits.h b/tools/testing/selftests/rseq/rseq-x86-bits.h
new file mode 100644
index 000000000000..8a9431eec467
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-x86-bits.h
@@ -0,0 +1,993 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * rseq-x86-bits.h
+ *
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "rseq-bits-template.h"
+
+#ifdef __x86_64__
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "movq %[v], %%rbx\n\t"
+ "cmpq %%rbx, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "movq %[v], %%rbx\n\t"
+ "cmpq %%rbx, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ "movq %%rbx, %[load]\n\t"
+ "addq %[voffp], %%rbx\n\t"
+ "movq (%%rbx), %%rbx\n\t"
+ /* final store */
+ "movq %%rbx, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "er" (voffp),
+ [load] "m" (*load)
+ : "memory", "cc", "rax", "rbx"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+#endif
+ /* final store */
+ "addq %[count], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "er" (count)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+#define RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+
+/*
+ * pval = *(ptr+off)
+ * *pval += inc;
+ */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, long off, intptr_t inc, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+#endif
+ /* get p+v */
+ "movq %[ptr], %%rbx\n\t"
+ "addq %[off], %%rbx\n\t"
+ /* get pv */
+ "movq (%%rbx), %%rcx\n\t"
+ /* *pv += inc */
+ "addq %[inc], (%%rcx)\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [ptr] "m" (*ptr),
+ [off] "er" (off),
+ [inc] "er" (inc)
+ : "memory", "cc", "rax", "rbx", "rcx"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ return 0;
+abort:
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ "cmpq %[v2], %[expect2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+ "cmpq %[v2], %[expect2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_after_asm_goto();
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "cmpq %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ "movq %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint64_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ "movq %[src], %[rseq_scratch0]\n\t"
+ "movq %[dst], %[rseq_scratch1]\n\t"
+ "movq %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 6f)
+ "cmpq %[v], %[expect]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ "test %[len], %[len]\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "movb (%[src]), %%al\n\t" \
+ "movb %%al, (%[dst])\n\t" \
+ "inc %[src]\n\t" \
+ "inc %[dst]\n\t" \
+ "dec %[len]\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ "movq %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ "movq %[rseq_scratch2], %[len]\n\t"
+ "movq %[rseq_scratch1], %[dst]\n\t"
+ "movq %[rseq_scratch0], %[src]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ : "memory", "cc", "rax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#elif defined(__i386__)
+
+#if defined(RSEQ_TEMPLATE_MO_RELAXED) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_storev)(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* final store */
+ "movl %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpnev_storeoffp_load)(intptr_t *v, intptr_t expectnot,
+ long voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[v], %%ebx\n\t"
+ "cmpl %%ebx, %[expectnot]\n\t"
+ "je %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "movl %[v], %%ebx\n\t"
+ "cmpl %%ebx, %[expectnot]\n\t"
+ "je %l[error2]\n\t"
+#endif
+ "movl %%ebx, %[load]\n\t"
+ "addl %[voffp], %%ebx\n\t"
+ "movl (%%ebx), %%ebx\n\t"
+ /* final store */
+ "movl %%ebx, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "ir" (voffp),
+ [load] "m" (*load)
+ : "memory", "cc", "eax", "ebx"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_addv)(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+#endif
+ /* final store */
+ "addl %[count], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [v] "m" (*v),
+ [count] "ir" (count)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_cmpeqv_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ "cmpl %[expect2], %[v2]\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "cmpl %[v], %[expect]\n\t"
+ "jnz %l[error2]\n\t"
+ "cmpl %[expect2], %[v2]\n\t"
+ "jnz %l[error3]\n\t"
+#endif
+ "movl %[newv], %%eax\n\t"
+ /* final store */
+ "movl %%eax, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "m" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_after_asm_goto();
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+#endif /* #if defined(RSEQ_TEMPLATE_MO_RELAXED) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) && \
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID))
+
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trystorev_storev)(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %[v], %%eax\n\t"
+ "jnz %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
+ "movl %[expect], %%eax\n\t"
+ "cmpl %[v], %%eax\n\t"
+ "jnz %l[error2]\n\t"
+#endif
+ /* try store */
+ "movl %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ "lock; addl $0,-128(%%esp)\n\t"
+#endif
+ /* final store */
+ "movl %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ RSEQ_ASM_DEFINE_ABORT(4, "", abort)
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "m" (expect),
+ [newv] "r" (newv)
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+
+}
+
+/* TODO: implement a faster memcpy. */
+static inline __attribute__((always_inline))
+int RSEQ_TEMPLATE_IDENTIFIER(rseq_cmpeqv_trymemcpy_storev)(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uint32_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
+ RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
+#endif
+ "movl %[src], %[rseq_scratch0]\n\t"
+ "movl %[dst], %[rseq_scratch1]\n\t"
+ "movl %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 4f)
+ RSEQ_INJECT_ASM(3)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %%eax, %[v]\n\t"
+ "jnz 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_TEMPLATE_CPU_ID_OFFSET(%[rseq_offset]), 6f)
+ "movl %[expect], %%eax\n\t"
+ "cmpl %%eax, %[v]\n\t"
+ "jnz 7f\n\t"
+#endif
+ /* try memcpy */
+ "test %[len], %[len]\n\t" \
+ "jz 333f\n\t" \
+ "222:\n\t" \
+ "movb (%[src]), %%al\n\t" \
+ "movb %%al, (%[dst])\n\t" \
+ "inc %[src]\n\t" \
+ "inc %[dst]\n\t" \
+ "dec %[len]\n\t" \
+ "jnz 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_TEMPLATE_MO_RELEASE
+ "lock; addl $0,-128(%%esp)\n\t"
+#endif
+ "movl %[newv], %%eax\n\t"
+ /* final store */
+ "movl %%eax, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t"
+ RSEQ_ASM_DEFINE_ABORT(4,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ abort)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ "movl %[rseq_scratch2], %[len]\n\t"
+ "movl %[rseq_scratch1], %[dst]\n\t"
+ "movl %[rseq_scratch0], %[src]\n\t",
+ error2)
+#endif
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [rseq_offset] "r" (rseq_offset),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "m" (expect),
+ [newv] "m" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ : "memory", "cc", "eax"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_after_asm_goto();
+ return 0;
+abort:
+ rseq_after_asm_goto();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_after_asm_goto();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_after_asm_goto();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_after_asm_goto();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* #if (defined(RSEQ_TEMPLATE_MO_RELAXED) || defined(RSEQ_TEMPLATE_MO_RELEASE)) &&
+ (defined(RSEQ_TEMPLATE_CPU_ID) || defined(RSEQ_TEMPLATE_MM_CID)) */
+
+#endif
+
+#include "rseq-bits-reset.h"
diff --git a/tools/testing/selftests/rseq/rseq-x86.h b/tools/testing/selftests/rseq/rseq-x86.h
index bd01dc41ca13..fb65ef54b0fb 100644
--- a/tools/testing/selftests/rseq/rseq-x86.h
+++ b/tools/testing/selftests/rseq/rseq-x86.h
@@ -2,9 +2,13 @@
/*
* rseq-x86.h
*
- * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * (C) Copyright 2016-2022 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
+#ifndef RSEQ_H
+#error "Never use <rseq-x86.h> directly; include <rseq.h> instead."
+#endif
+
#include <stdint.h>
/*
@@ -22,9 +26,10 @@
* address through a "r" input operand.
*/
-/* Offset of cpu_id and rseq_cs fields in struct rseq. */
+/* Offset of cpu_id, rseq_cs, and mm_cid fields in struct rseq. */
#define RSEQ_CPU_ID_OFFSET 4
#define RSEQ_CS_OFFSET 8
+#define RSEQ_MM_CID_OFFSET 24
#ifdef __x86_64__
@@ -50,10 +55,6 @@ do { \
RSEQ_WRITE_ONCE(*p, v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
#define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
".pushsection __rseq_cs, \"aw\"\n\t" \
@@ -112,525 +113,6 @@ do { \
"jmp %l[" __rseq_str(cmpfail_label) "]\n\t" \
".popsection\n\t"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpq %[v], %[expect]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "cmpq %[v], %[expect]\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* final store */
- "movq %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- : "memory", "cc", "rax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-/*
- * Compare @v against @expectnot. When it does _not_ match, load @v
- * into @load, and store the content of *@v + voffp into @v.
- */
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "movq %[v], %%rbx\n\t"
- "cmpq %%rbx, %[expectnot]\n\t"
- "je %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "movq %[v], %%rbx\n\t"
- "cmpq %%rbx, %[expectnot]\n\t"
- "je %l[error2]\n\t"
-#endif
- "movq %%rbx, %[load]\n\t"
- "addq %[voffp], %%rbx\n\t"
- "movq (%%rbx), %%rbx\n\t"
- /* final store */
- "movq %%rbx, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [voffp] "er" (voffp),
- [load] "m" (*load)
- : "memory", "cc", "rax", "rbx"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
-#endif
- /* final store */
- "addq %[count], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [count] "er" (count)
- : "memory", "cc", "rax"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-#define RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
-
-/*
- * pval = *(ptr+off)
- * *pval += inc;
- */
-static inline __attribute__((always_inline))
-int rseq_offset_deref_addv(intptr_t *ptr, long off, intptr_t inc, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
-#endif
- /* get p+v */
- "movq %[ptr], %%rbx\n\t"
- "addq %[off], %%rbx\n\t"
- /* get pv */
- "movq (%%rbx), %%rcx\n\t"
- /* *pv += inc */
- "addq %[inc], (%%rcx)\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [ptr] "m" (*ptr),
- [off] "er" (off),
- [inc] "er" (inc)
- : "memory", "cc", "rax", "rbx", "rcx"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- return 0;
-abort:
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpq %[v], %[expect]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "cmpq %[v], %[expect]\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* try store */
- "movq %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- /* final store */
- "movq %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- : "memory", "cc", "rax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-/* x86-64 is TSO. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- return rseq_cmpeqv_trystorev_storev(v, expect, v2, newv2, newv, cpu);
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpq %[v], %[expect]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
- "cmpq %[v2], %[expect2]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "cmpq %[v], %[expect]\n\t"
- "jnz %l[error2]\n\t"
- "cmpq %[v2], %[expect2]\n\t"
- "jnz %l[error3]\n\t"
-#endif
- /* final store */
- "movq %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* cmp2 input */
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- : "memory", "cc", "rax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("1st expected value comparison failed");
-error3:
- rseq_after_asm_goto();
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uint64_t rseq_scratch[3];
-
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- "movq %[src], %[rseq_scratch0]\n\t"
- "movq %[dst], %[rseq_scratch1]\n\t"
- "movq %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpq %[v], %[expect]\n\t"
- "jnz 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f)
- "cmpq %[v], %[expect]\n\t"
- "jnz 7f\n\t"
-#endif
- /* try memcpy */
- "test %[len], %[len]\n\t" \
- "jz 333f\n\t" \
- "222:\n\t" \
- "movb (%[src]), %%al\n\t" \
- "movb %%al, (%[dst])\n\t" \
- "inc %[src]\n\t" \
- "inc %[dst]\n\t" \
- "dec %[len]\n\t" \
- "jnz 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- /* final store */
- "movq %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- "movq %[rseq_scratch2], %[len]\n\t"
- "movq %[rseq_scratch1], %[dst]\n\t"
- "movq %[rseq_scratch0], %[src]\n\t"
- RSEQ_ASM_DEFINE_ABORT(4,
- "movq %[rseq_scratch2], %[len]\n\t"
- "movq %[rseq_scratch1], %[dst]\n\t"
- "movq %[rseq_scratch0], %[src]\n\t",
- abort)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- "movq %[rseq_scratch2], %[len]\n\t"
- "movq %[rseq_scratch1], %[dst]\n\t"
- "movq %[rseq_scratch0], %[src]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- "movq %[rseq_scratch2], %[len]\n\t"
- "movq %[rseq_scratch1], %[dst]\n\t"
- "movq %[rseq_scratch0], %[src]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- "movq %[rseq_scratch2], %[len]\n\t"
- "movq %[rseq_scratch1], %[dst]\n\t"
- "movq %[rseq_scratch0], %[src]\n\t",
- error2)
-#endif
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- : "memory", "cc", "rax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-/* x86-64 is TSO. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- return rseq_cmpeqv_trymemcpy_storev(v, expect, dst, src, len,
- newv, cpu);
-}
-
-#endif /* !RSEQ_SKIP_FASTPATH */
-
#elif defined(__i386__)
#define RSEQ_ASM_TP_SEGMENT %%gs
@@ -657,10 +139,6 @@ do { \
RSEQ_WRITE_ONCE(*p, v); \
} while (0)
-#ifdef RSEQ_SKIP_FASTPATH
-#include "rseq-skip.h"
-#else /* !RSEQ_SKIP_FASTPATH */
-
/*
* Use eax as scratch register and take memory operands as input to
* lessen register pressure. Especially needed when compiling in O0.
@@ -721,645 +199,36 @@ do { \
"jmp %l[" __rseq_str(cmpfail_label) "]\n\t" \
".popsection\n\t"
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpl %[v], %[expect]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "cmpl %[v], %[expect]\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* final store */
- "movl %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-/*
- * Compare @v against @expectnot. When it does _not_ match, load @v
- * into @load, and store the content of *@v + voffp into @v.
- */
-static inline __attribute__((always_inline))
-int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
- long voffp, intptr_t *load, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "movl %[v], %%ebx\n\t"
- "cmpl %%ebx, %[expectnot]\n\t"
- "je %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "movl %[v], %%ebx\n\t"
- "cmpl %%ebx, %[expectnot]\n\t"
- "je %l[error2]\n\t"
-#endif
- "movl %%ebx, %[load]\n\t"
- "addl %[voffp], %%ebx\n\t"
- "movl (%%ebx), %%ebx\n\t"
- /* final store */
- "movl %%ebx, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(5)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [expectnot] "r" (expectnot),
- [voffp] "ir" (voffp),
- [load] "m" (*load)
- : "memory", "cc", "eax", "ebx"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_addv(intptr_t *v, intptr_t count, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
-#endif
- /* final store */
- "addl %[count], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(4)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [count] "ir" (count)
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort
-#ifdef RSEQ_COMPARE_TWICE
- , error1
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpl %[v], %[expect]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "cmpl %[v], %[expect]\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* try store */
- "movl %[newv2], %%eax\n\t"
- "movl %%eax, %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- /* final store */
- "movl %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "m" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "r" (newv)
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t newv2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "movl %[expect], %%eax\n\t"
- "cmpl %[v], %%eax\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "movl %[expect], %%eax\n\t"
- "cmpl %[v], %%eax\n\t"
- "jnz %l[error2]\n\t"
-#endif
- /* try store */
- "movl %[newv2], %[v2]\n\t"
- RSEQ_INJECT_ASM(5)
- "lock; addl $0,-128(%%esp)\n\t"
- /* final store */
- "movl %[newv], %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* try store input */
- [v2] "m" (*v2),
- [newv2] "r" (newv2),
- /* final store input */
- [v] "m" (*v),
- [expect] "m" (expect),
- [newv] "r" (newv)
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-
-}
-
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
- intptr_t *v2, intptr_t expect2,
- intptr_t newv, int cpu)
-{
- RSEQ_INJECT_C(9)
-
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error3])
-#endif
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "cmpl %[v], %[expect]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(4)
- "cmpl %[expect2], %[v2]\n\t"
- "jnz %l[cmpfail]\n\t"
- RSEQ_INJECT_ASM(5)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), %l[error1])
- "cmpl %[v], %[expect]\n\t"
- "jnz %l[error2]\n\t"
- "cmpl %[expect2], %[v2]\n\t"
- "jnz %l[error3]\n\t"
#endif
- "movl %[newv], %%eax\n\t"
- /* final store */
- "movl %%eax, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- RSEQ_ASM_DEFINE_ABORT(4, "", abort)
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* cmp2 input */
- [v2] "m" (*v2),
- [expect2] "r" (expect2),
- /* final store input */
- [v] "m" (*v),
- [expect] "r" (expect),
- [newv] "m" (newv)
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2, error3
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("1st expected value comparison failed");
-error3:
- rseq_after_asm_goto();
- rseq_bug("2nd expected value comparison failed");
-#endif
-}
-/* TODO: implement a faster memcpy. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uint32_t rseq_scratch[3];
+/* Per-cpu-id indexing. */
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_CPU_ID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-x86-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- "movl %[src], %[rseq_scratch0]\n\t"
- "movl %[dst], %[rseq_scratch1]\n\t"
- "movl %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "movl %[expect], %%eax\n\t"
- "cmpl %%eax, %[v]\n\t"
- "jnz 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f)
- "movl %[expect], %%eax\n\t"
- "cmpl %%eax, %[v]\n\t"
- "jnz 7f\n\t"
-#endif
- /* try memcpy */
- "test %[len], %[len]\n\t" \
- "jz 333f\n\t" \
- "222:\n\t" \
- "movb (%[src]), %%al\n\t" \
- "movb %%al, (%[dst])\n\t" \
- "inc %[src]\n\t" \
- "inc %[dst]\n\t" \
- "dec %[len]\n\t" \
- "jnz 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- "movl %[newv], %%eax\n\t"
- /* final store */
- "movl %%eax, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t"
- RSEQ_ASM_DEFINE_ABORT(4,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- abort)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- error2)
-#endif
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [expect] "m" (expect),
- [newv] "m" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-x86-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_CPU_ID
-/* TODO: implement a faster memcpy. */
-static inline __attribute__((always_inline))
-int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
- void *dst, void *src, size_t len,
- intptr_t newv, int cpu)
-{
- uint32_t rseq_scratch[3];
+/* Per-mm-cid indexing. */
- RSEQ_INJECT_C(9)
+#define RSEQ_TEMPLATE_MM_CID
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-x86-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
- __asm__ __volatile__ goto (
- RSEQ_ASM_DEFINE_TABLE(3, 1f, 2f, 4f) /* start, commit, abort */
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[cmpfail])
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error1])
- RSEQ_ASM_DEFINE_EXIT_POINT(1f, %l[error2])
-#endif
- "movl %[src], %[rseq_scratch0]\n\t"
- "movl %[dst], %[rseq_scratch1]\n\t"
- "movl %[len], %[rseq_scratch2]\n\t"
- /* Start rseq by storing table entry pointer into rseq_cs. */
- RSEQ_ASM_STORE_RSEQ_CS(1, 3b, RSEQ_ASM_TP_SEGMENT:RSEQ_CS_OFFSET(%[rseq_offset]))
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 4f)
- RSEQ_INJECT_ASM(3)
- "movl %[expect], %%eax\n\t"
- "cmpl %%eax, %[v]\n\t"
- "jnz 5f\n\t"
- RSEQ_INJECT_ASM(4)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_CMP_CPU_ID(cpu_id, RSEQ_ASM_TP_SEGMENT:RSEQ_CPU_ID_OFFSET(%[rseq_offset]), 6f)
- "movl %[expect], %%eax\n\t"
- "cmpl %%eax, %[v]\n\t"
- "jnz 7f\n\t"
-#endif
- /* try memcpy */
- "test %[len], %[len]\n\t" \
- "jz 333f\n\t" \
- "222:\n\t" \
- "movb (%[src]), %%al\n\t" \
- "movb %%al, (%[dst])\n\t" \
- "inc %[src]\n\t" \
- "inc %[dst]\n\t" \
- "dec %[len]\n\t" \
- "jnz 222b\n\t" \
- "333:\n\t" \
- RSEQ_INJECT_ASM(5)
- "lock; addl $0,-128(%%esp)\n\t"
- "movl %[newv], %%eax\n\t"
- /* final store */
- "movl %%eax, %[v]\n\t"
- "2:\n\t"
- RSEQ_INJECT_ASM(6)
- /* teardown */
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t"
- RSEQ_ASM_DEFINE_ABORT(4,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- abort)
- RSEQ_ASM_DEFINE_CMPFAIL(5,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- cmpfail)
-#ifdef RSEQ_COMPARE_TWICE
- RSEQ_ASM_DEFINE_CMPFAIL(6,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- error1)
- RSEQ_ASM_DEFINE_CMPFAIL(7,
- "movl %[rseq_scratch2], %[len]\n\t"
- "movl %[rseq_scratch1], %[dst]\n\t"
- "movl %[rseq_scratch0], %[src]\n\t",
- error2)
-#endif
- : /* gcc asm goto does not allow outputs */
- : [cpu_id] "r" (cpu),
- [rseq_offset] "r" (rseq_offset),
- /* final store input */
- [v] "m" (*v),
- [expect] "m" (expect),
- [newv] "m" (newv),
- /* try memcpy input */
- [dst] "r" (dst),
- [src] "r" (src),
- [len] "r" (len),
- [rseq_scratch0] "m" (rseq_scratch[0]),
- [rseq_scratch1] "m" (rseq_scratch[1]),
- [rseq_scratch2] "m" (rseq_scratch[2])
- : "memory", "cc", "eax"
- RSEQ_INJECT_CLOBBER
- : abort, cmpfail
-#ifdef RSEQ_COMPARE_TWICE
- , error1, error2
-#endif
- );
- rseq_after_asm_goto();
- return 0;
-abort:
- rseq_after_asm_goto();
- RSEQ_INJECT_FAILED
- return -1;
-cmpfail:
- rseq_after_asm_goto();
- return 1;
-#ifdef RSEQ_COMPARE_TWICE
-error1:
- rseq_after_asm_goto();
- rseq_bug("cpu_id comparison failed");
-error2:
- rseq_after_asm_goto();
- rseq_bug("expected value comparison failed");
-#endif
-}
+#define RSEQ_TEMPLATE_MO_RELEASE
+#include "rseq-x86-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELEASE
+#undef RSEQ_TEMPLATE_MM_CID
-#endif /* !RSEQ_SKIP_FASTPATH */
+/* APIs which are not based on cpu ids. */
-#endif
+#define RSEQ_TEMPLATE_CPU_ID_NONE
+#define RSEQ_TEMPLATE_MO_RELAXED
+#include "rseq-x86-bits.h"
+#undef RSEQ_TEMPLATE_MO_RELAXED
+#undef RSEQ_TEMPLATE_CPU_ID_NONE
diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c
index 4177f9507bbe..4e4aa006004c 100644
--- a/tools/testing/selftests/rseq/rseq.c
+++ b/tools/testing/selftests/rseq/rseq.c
@@ -28,6 +28,8 @@
#include <limits.h>
#include <dlfcn.h>
#include <stddef.h>
+#include <sys/auxv.h>
+#include <linux/auxvec.h>
#include "../kselftest.h"
#include "rseq.h"
@@ -36,20 +38,38 @@ static const ptrdiff_t *libc_rseq_offset_p;
static const unsigned int *libc_rseq_size_p;
static const unsigned int *libc_rseq_flags_p;
-/* Offset from the thread pointer to the rseq area. */
+/* Offset from the thread pointer to the rseq area. */
ptrdiff_t rseq_offset;
-/* Size of the registered rseq area. 0 if the registration was
- unsuccessful. */
+/*
+ * Size of the registered rseq area. 0 if the registration was
+ * unsuccessful.
+ */
unsigned int rseq_size = -1U;
/* Flags used during rseq registration. */
unsigned int rseq_flags;
+/*
+ * rseq feature size supported by the kernel. 0 if the registration was
+ * unsuccessful.
+ */
+unsigned int rseq_feature_size = -1U;
+
static int rseq_ownership;
+static int rseq_reg_success; /* At least one rseq registration has succeded. */
+
+/* Allocate a large area for the TLS. */
+#define RSEQ_THREAD_AREA_ALLOC_SIZE 1024
+
+/* Original struct rseq feature size is 20 bytes. */
+#define ORIG_RSEQ_FEATURE_SIZE 20
+
+/* Original struct rseq allocation size is 32 bytes. */
+#define ORIG_RSEQ_ALLOC_SIZE 32
static
-__thread struct rseq_abi __rseq_abi __attribute__((tls_model("initial-exec"))) = {
+__thread struct rseq_abi __rseq_abi __attribute__((tls_model("initial-exec"), aligned(RSEQ_THREAD_AREA_ALLOC_SIZE))) = {
.cpu_id = RSEQ_ABI_CPU_ID_UNINITIALIZED,
};
@@ -59,6 +79,11 @@ static int sys_rseq(struct rseq_abi *rseq_abi, uint32_t rseq_len,
return syscall(__NR_rseq, rseq_abi, rseq_len, flags, sig);
}
+static int sys_getcpu(unsigned *cpu, unsigned *node)
+{
+ return syscall(__NR_getcpu, cpu, node, NULL);
+}
+
int rseq_available(void)
{
int rc;
@@ -84,10 +109,16 @@ int rseq_register_current_thread(void)
/* Treat libc's ownership as a successful registration. */
return 0;
}
- rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), 0, RSEQ_SIG);
- if (rc)
+ rc = sys_rseq(&__rseq_abi, rseq_size, 0, RSEQ_SIG);
+ if (rc) {
+ if (RSEQ_READ_ONCE(rseq_reg_success)) {
+ /* Incoherent success/failure within process. */
+ abort();
+ }
return -1;
+ }
assert(rseq_current_cpu_raw() >= 0);
+ RSEQ_WRITE_ONCE(rseq_reg_success, 1);
return 0;
}
@@ -99,12 +130,28 @@ int rseq_unregister_current_thread(void)
/* Treat libc's ownership as a successful unregistration. */
return 0;
}
- rc = sys_rseq(&__rseq_abi, sizeof(struct rseq_abi), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
+ rc = sys_rseq(&__rseq_abi, rseq_size, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG);
if (rc)
return -1;
return 0;
}
+static
+unsigned int get_rseq_feature_size(void)
+{
+ unsigned long auxv_rseq_feature_size, auxv_rseq_align;
+
+ auxv_rseq_align = getauxval(AT_RSEQ_ALIGN);
+ assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE);
+
+ auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE);
+ assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE);
+ if (auxv_rseq_feature_size)
+ return auxv_rseq_feature_size;
+ else
+ return ORIG_RSEQ_FEATURE_SIZE;
+}
+
static __attribute__((constructor))
void rseq_init(void)
{
@@ -117,14 +164,24 @@ void rseq_init(void)
rseq_offset = *libc_rseq_offset_p;
rseq_size = *libc_rseq_size_p;
rseq_flags = *libc_rseq_flags_p;
+ rseq_feature_size = get_rseq_feature_size();
+ if (rseq_feature_size > rseq_size)
+ rseq_feature_size = rseq_size;
return;
}
- if (!rseq_available())
- return;
rseq_ownership = 1;
+ if (!rseq_available()) {
+ rseq_size = 0;
+ rseq_feature_size = 0;
+ return;
+ }
rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer();
- rseq_size = sizeof(struct rseq_abi);
rseq_flags = 0;
+ rseq_feature_size = get_rseq_feature_size();
+ if (rseq_feature_size == ORIG_RSEQ_FEATURE_SIZE)
+ rseq_size = ORIG_RSEQ_ALLOC_SIZE;
+ else
+ rseq_size = RSEQ_THREAD_AREA_ALLOC_SIZE;
}
static __attribute__((destructor))
@@ -134,6 +191,7 @@ void rseq_exit(void)
return;
rseq_offset = 0;
rseq_size = -1U;
+ rseq_feature_size = -1U;
rseq_ownership = 0;
}
@@ -148,3 +206,16 @@ int32_t rseq_fallback_current_cpu(void)
}
return cpu;
}
+
+int32_t rseq_fallback_current_node(void)
+{
+ uint32_t cpu_id, node_id;
+ int ret;
+
+ ret = sys_getcpu(&cpu_id, &node_id);
+ if (ret) {
+ perror("sys_getcpu()");
+ return ret;
+ }
+ return (int32_t) node_id;
+}
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index 6f7513384bf5..d7364ea4d201 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -20,6 +20,15 @@
#include "rseq-abi.h"
#include "compiler.h"
+#ifndef rseq_sizeof_field
+#define rseq_sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+#ifndef rseq_offsetofend
+#define rseq_offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + rseq_sizeof_field(TYPE, MEMBER))
+#endif
+
/*
* Empty code injection macros, override when testing.
* It is important to consider that the ASM injection macros need to be
@@ -47,14 +56,38 @@
#include "rseq-thread-pointer.h"
-/* Offset from the thread pointer to the rseq area. */
+/* Offset from the thread pointer to the rseq area. */
extern ptrdiff_t rseq_offset;
-/* Size of the registered rseq area. 0 if the registration was
- unsuccessful. */
+
+/*
+ * Size of the registered rseq area. 0 if the registration was
+ * unsuccessful.
+ */
extern unsigned int rseq_size;
-/* Flags used during rseq registration. */
+
+/* Flags used during rseq registration. */
extern unsigned int rseq_flags;
+/*
+ * rseq feature size supported by the kernel. 0 if the registration was
+ * unsuccessful.
+ */
+extern unsigned int rseq_feature_size;
+
+enum rseq_mo {
+ RSEQ_MO_RELAXED = 0,
+ RSEQ_MO_CONSUME = 1, /* Unused */
+ RSEQ_MO_ACQUIRE = 2, /* Unused */
+ RSEQ_MO_RELEASE = 3,
+ RSEQ_MO_ACQ_REL = 4, /* Unused */
+ RSEQ_MO_SEQ_CST = 5, /* Unused */
+};
+
+enum rseq_percpu_mode {
+ RSEQ_PERCPU_CPU_ID = 0,
+ RSEQ_PERCPU_MM_CID = 1,
+};
+
static inline struct rseq_abi *rseq_get_abi(void)
{
return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
@@ -119,6 +152,11 @@ int rseq_unregister_current_thread(void);
int32_t rseq_fallback_current_cpu(void);
/*
+ * Restartable sequence fallback for reading the current node number.
+ */
+int32_t rseq_fallback_current_node(void);
+
+/*
* Values returned can be either the current CPU number, -1 (rseq is
* uninitialized), or -2 (rseq initialization has failed).
*/
@@ -153,6 +191,30 @@ static inline uint32_t rseq_current_cpu(void)
return cpu;
}
+static inline bool rseq_node_id_available(void)
+{
+ return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, node_id);
+}
+
+/*
+ * Current NUMA node number.
+ */
+static inline uint32_t rseq_current_node_id(void)
+{
+ assert(rseq_node_id_available());
+ return RSEQ_ACCESS_ONCE(rseq_get_abi()->node_id);
+}
+
+static inline bool rseq_mm_cid_available(void)
+{
+ return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, mm_cid);
+}
+
+static inline uint32_t rseq_current_mm_cid(void)
+{
+ return RSEQ_ACCESS_ONCE(rseq_get_abi()->mm_cid);
+}
+
static inline void rseq_clear_rseq_cs(void)
{
RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
@@ -174,4 +236,149 @@ static inline void rseq_prepare_unload(void)
rseq_clear_rseq_cs();
}
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *v, intptr_t expect,
+ intptr_t newv, int cpu)
+{
+ if (rseq_mo != RSEQ_MO_RELAXED)
+ return -1;
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpeqv_storev_relaxed_cpu_id(v, expect, newv, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpeqv_storev_relaxed_mm_cid(v, expect, newv, cpu);
+ }
+ return -1;
+}
+
+/*
+ * Compare @v against @expectnot. When it does _not_ match, load @v
+ * into @load, and store the content of *@v + voffp into @v.
+ */
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
+ int cpu)
+{
+ if (rseq_mo != RSEQ_MO_RELAXED)
+ return -1;
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpnev_storeoffp_load_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpnev_storeoffp_load_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
+ }
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *v, intptr_t count, int cpu)
+{
+ if (rseq_mo != RSEQ_MO_RELAXED)
+ return -1;
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_addv_relaxed_cpu_id(v, count, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_addv_relaxed_mm_cid(v, count, cpu);
+ }
+ return -1;
+}
+
+#ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
+/*
+ * pval = *(ptr+off)
+ * *pval += inc;
+ */
+static inline __attribute__((always_inline))
+int rseq_offset_deref_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *ptr, long off, intptr_t inc, int cpu)
+{
+ if (rseq_mo != RSEQ_MO_RELAXED)
+ return -1;
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_offset_deref_addv_relaxed_cpu_id(ptr, off, inc, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_offset_deref_addv_relaxed_mm_cid(ptr, off, inc, cpu);
+ }
+ return -1;
+}
+#endif
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ switch (rseq_mo) {
+ case RSEQ_MO_RELAXED:
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpeqv_trystorev_storev_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpeqv_trystorev_storev_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
+ }
+ return -1;
+ case RSEQ_MO_RELEASE:
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpeqv_trystorev_storev_release_cpu_id(v, expect, v2, newv2, newv, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpeqv_trystorev_storev_release_mm_cid(v, expect, v2, newv2, newv, cpu);
+ }
+ return -1;
+ default:
+ return -1;
+ }
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ if (rseq_mo != RSEQ_MO_RELAXED)
+ return -1;
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpeqv_cmpeqv_storev_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpeqv_cmpeqv_storev_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
+ }
+ return -1;
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
+ intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ switch (rseq_mo) {
+ case RSEQ_MO_RELAXED:
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpeqv_trymemcpy_storev_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpeqv_trymemcpy_storev_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
+ }
+ return -1;
+ case RSEQ_MO_RELEASE:
+ switch (percpu_mode) {
+ case RSEQ_PERCPU_CPU_ID:
+ return rseq_cmpeqv_trymemcpy_storev_release_cpu_id(v, expect, dst, src, len, newv, cpu);
+ case RSEQ_PERCPU_MM_CID:
+ return rseq_cmpeqv_trymemcpy_storev_release_mm_cid(v, expect, dst, src, len, newv, cpu);
+ }
+ return -1;
+ default:
+ return -1;
+ }
+}
+
#endif /* RSEQ_H_ */
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
index f51bc83c9e41..8d31426ab41f 100755
--- a/tools/testing/selftests/rseq/run_param_test.sh
+++ b/tools/testing/selftests/rseq/run_param_test.sh
@@ -42,6 +42,11 @@ function do_tests()
./param_test ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
echo "Running compare-twice test ${TEST_NAME[$i]}"
./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
+
+ echo "Running mm_cid test ${TEST_NAME[$i]}"
+ ./param_test_mm_cid ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
+ echo "Running mm_cid compare-twice test ${TEST_NAME[$i]}"
+ ./param_test_mm_cid_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1
let "i++"
done
}