summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib/qspinlock.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2022-11-26 19:59:23 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2022-12-02 17:48:49 +1100
commitbd48287b2cf4cd6e95576db3a94fd2a7cdf9832d (patch)
tree533571715d41dceb7f80df1cc23ceeb8424deb58 /arch/powerpc/lib/qspinlock.c
parent085f03311bcede99550e08a1f7cad41bf758b460 (diff)
powerpc/qspinlock: implement option to yield to previous node
Queued waiters which are not at the head of the queue don't spin on the lock word but their qnode lock word, waiting for the previous queued CPU to release them. Add an option which allows these waiters to yield to the previous CPU if its vCPU is preempted. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20221126095932.1234527-9-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/lib/qspinlock.c')
-rw-r--r--arch/powerpc/lib/qspinlock.c46
1 files changed, 45 insertions, 1 deletions
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 18e21574e6c5..41afd8e68918 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -26,6 +26,7 @@ static bool maybe_stealers __read_mostly = true;
static int head_spins __read_mostly = (1 << 8);
static bool pv_yield_owner __read_mostly = true;
+static bool pv_yield_prev __read_mostly = true;
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@@ -195,6 +196,32 @@ relax:
cpu_relax();
}
+static __always_inline void yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt)
+{
+ int prev_cpu = decode_tail_cpu(val);
+ u32 yield_count;
+
+ if (!paravirt)
+ goto relax;
+
+ if (!pv_yield_prev)
+ goto relax;
+
+ yield_count = yield_count_of(prev_cpu);
+ if ((yield_count & 1) == 0)
+ goto relax; /* owner vcpu is running */
+
+ smp_rmb(); /* See yield_to_locked_owner comment */
+
+ if (!node->locked) {
+ yield_to_preempted(prev_cpu, yield_count);
+ return;
+ }
+
+relax:
+ cpu_relax();
+}
+
static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt)
{
@@ -269,7 +296,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
/* Wait for mcs node lock to be released */
while (!node->locked)
- cpu_relax();
+ yield_to_prev(lock, node, old, paravirt);
smp_rmb(); /* acquire barrier for the mcs lock */
}
@@ -417,12 +444,29 @@ static int pv_yield_owner_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n");
+static int pv_yield_prev_set(void *data, u64 val)
+{
+ pv_yield_prev = !!val;
+
+ return 0;
+}
+
+static int pv_yield_prev_get(void *data, u64 *val)
+{
+ *val = pv_yield_prev;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev, pv_yield_prev_get, pv_yield_prev_set, "%llu\n");
+
static __init int spinlock_debugfs_init(void)
{
debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins);
if (is_shared_processor()) {
debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner);
+ debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
}
return 0;