summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-10-21 18:46:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-10-21 18:46:47 -0700
commit1acfd2bd3f0d9dc34ea1871a445c554220945d9f (patch)
tree7891a7513ebc18345e4e4c37cad1be5ee102b352 /arch
parentd537ae43f8a107761fb5a85c3f0cfce5ca79bcb1 (diff)
parentf9bc9bbe8afdf83412728f0b464979a72a3b9ec2 (diff)
Merge tag 'powerpc-6.6-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: - Fix stale propagated yield_cpu in qspinlocks leading to lockups - Fix broken hugepages on some configs due to ARCH_FORCE_MAX_ORDER - Fix a spurious warning when copros are in use at exit time Thanks to Nicholas Piggin, Christophe Leroy, Nysal Jan K.A Sachin Sant, and Shrikanth Hegde. * tag 'powerpc-6.6-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/qspinlock: Fix stale propagated yield_cpu powerpc/64s/radix: Don't warn on copros in radix__tlb_flush() powerpc/mm: Allow ARCH_FORCE_MAX_ORDER up to 12
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/lib/qspinlock.c3
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c9
3 files changed, 5 insertions, 9 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3aaadfd2c8eb..d5d5388973ac 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER
default "6" if PPC32 && PPC_64K_PAGES
range 4 10 if PPC32 && PPC_256K_PAGES
default "4" if PPC32 && PPC_256K_PAGES
- range 10 10
+ range 10 12
default "10"
help
The kernel page allocator limits the size of maximal physically
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 253620979d0c..6dd2f46bd3ef 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if ((yield_count & 1) == 0)
goto yield_prev; /* owner vcpu is running */
+ if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
+ goto yield_prev; /* re-sample lock owner */
+
spin_end();
preempted = true;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 39acc2cbab4c..9e1f6558d026 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
smp_mb(); /* see radix__flush_tlb_mm */
exit_flush_lazy_tlbs(mm);
- _tlbiel_pid(mm->context.id, RIC_FLUSH_ALL);
-
- /*
- * It should not be possible to have coprocessors still
- * attached here.
- */
- if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0))
- __flush_all_mm(mm, true);
+ __flush_all_mm(mm, true);
preempt_enable();
} else {