summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/8xx_immap.h2
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h123
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h77
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-pkey.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h54
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h7
-rw-r--r--arch/powerpc/include/asm/bug.h1
-rw-r--r--arch/powerpc/include/asm/cpm2.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h2
-rw-r--r--arch/powerpc/include/asm/dtl.h1
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h1
-rw-r--r--arch/powerpc/include/asm/fs_pd.h22
-rw-r--r--arch/powerpc/include/asm/ftrace.h24
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h1
-rw-r--r--arch/powerpc/include/asm/ibmebus.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h3
-rw-r--r--arch/powerpc/include/asm/kfence.h2
-rw-r--r--arch/powerpc/include/asm/kup.h91
-rw-r--r--arch/powerpc/include/asm/lppaca.h37
-rw-r--r--arch/powerpc/include/asm/macio.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h9
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/module.h4
-rw-r--r--arch/powerpc/include/asm/mpc8260.h22
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h62
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/nohash/kup-booke.h68
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-e500.h3
-rw-r--r--arch/powerpc/include/asm/paca.h6
-rw-r--r--arch/powerpc/include/asm/page.h30
-rw-r--r--arch/powerpc/include/asm/paravirt.h1
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h1
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h2
-rw-r--r--arch/powerpc/include/asm/processor.h5
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h3
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/topology.h15
-rw-r--r--arch/powerpc/include/asm/uaccess.h6
-rw-r--r--arch/powerpc/include/asm/vermagic.h4
-rw-r--r--arch/powerpc/include/asm/vphn.h24
46 files changed, 340 insertions, 401 deletions
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
index bdf0563ba423..f9cac46a95cb 100644
--- a/arch/powerpc/include/asm/8xx_immap.h
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -560,5 +560,7 @@ typedef struct immap {
cpm8xx_t im_cpm; /* Communication processor */
} immap_t;
+extern immap_t __iomem *mpc8xx_immr;
+
#endif /* __IMMAP_8XX__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 419319c4963c..61a8d5555cd7 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -3,7 +3,6 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_spu.h
generic-y += agp.h
-generic-y += export.h
generic-y += kvm_types.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 678f9c9d89b6..4e14a5427a63 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -9,79 +9,53 @@
#ifndef __ASSEMBLY__
-#include <linux/jump_label.h>
-
-extern struct static_key_false disable_kuap_key;
-
-static __always_inline bool kuep_is_disabled(void)
-{
- return !IS_ENABLED(CONFIG_PPC_KUEP);
-}
-
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
#define KUAP_NONE (~0UL)
-#define KUAP_ALL (~1UL)
-static __always_inline bool kuap_is_disabled(void)
-{
- return static_branch_unlikely(&disable_kuap_key);
-}
-
-static inline void kuap_lock_one(unsigned long addr)
+static __always_inline void kuap_lock_one(unsigned long addr)
{
mtsr(mfsr(addr) | SR_KS, addr);
isync(); /* Context sync required after mtsr() */
}
-static inline void kuap_unlock_one(unsigned long addr)
+static __always_inline void kuap_unlock_one(unsigned long addr)
{
mtsr(mfsr(addr) & ~SR_KS, addr);
isync(); /* Context sync required after mtsr() */
}
-static inline void kuap_lock_all(void)
+static __always_inline void uaccess_begin_32s(unsigned long addr)
{
- update_user_segments(mfsr(0) | SR_KS);
- isync(); /* Context sync required after mtsr() */
-}
+ unsigned long tmp;
-static inline void kuap_unlock_all(void)
-{
- update_user_segments(mfsr(0) & ~SR_KS);
- isync(); /* Context sync required after mtsr() */
+ asm volatile(ASM_MMU_FTR_IFSET(
+ "mfsrin %0, %1;"
+ "rlwinm %0, %0, 0, %2;"
+ "mtsrin %0, %1;"
+ "isync", "", %3)
+ : "=&r"(tmp)
+ : "r"(addr), "i"(~SR_KS), "i"(MMU_FTR_KUAP)
+ : "memory");
}
-void kuap_lock_all_ool(void);
-void kuap_unlock_all_ool(void);
-
-static inline void kuap_lock_addr(unsigned long addr, bool ool)
+static __always_inline void uaccess_end_32s(unsigned long addr)
{
- if (likely(addr != KUAP_ALL))
- kuap_lock_one(addr);
- else if (!ool)
- kuap_lock_all();
- else
- kuap_lock_all_ool();
-}
+ unsigned long tmp;
-static inline void kuap_unlock(unsigned long addr, bool ool)
-{
- if (likely(addr != KUAP_ALL))
- kuap_unlock_one(addr);
- else if (!ool)
- kuap_unlock_all();
- else
- kuap_unlock_all_ool();
+ asm volatile(ASM_MMU_FTR_IFSET(
+ "mfsrin %0, %1;"
+ "oris %0, %0, %2;"
+ "mtsrin %0, %1;"
+ "isync", "", %3)
+ : "=&r"(tmp)
+ : "r"(addr), "i"(SR_KS >> 16), "i"(MMU_FTR_KUAP)
+ : "memory");
}
-static inline void __kuap_lock(void)
-{
-}
-
-static inline void __kuap_save_and_lock(struct pt_regs *regs)
+static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
{
unsigned long kuap = current->thread.kuap;
@@ -90,18 +64,19 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs)
return;
current->thread.kuap = KUAP_NONE;
- kuap_lock_addr(kuap, false);
+ kuap_lock_one(kuap);
}
+#define __kuap_save_and_lock __kuap_save_and_lock
-static inline void kuap_user_restore(struct pt_regs *regs)
+static __always_inline void kuap_user_restore(struct pt_regs *regs)
{
}
-static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
if (unlikely(kuap != KUAP_NONE)) {
current->thread.kuap = KUAP_NONE;
- kuap_lock_addr(kuap, false);
+ kuap_lock_one(kuap);
}
if (likely(regs->kuap == KUAP_NONE))
@@ -109,10 +84,10 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua
current->thread.kuap = regs->kuap;
- kuap_unlock(regs->kuap, false);
+ kuap_unlock_one(regs->kuap);
}
-static inline unsigned long __kuap_get_and_assert_locked(void)
+static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
@@ -120,9 +95,10 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
return kuap;
}
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
-static __always_inline void __allow_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
BUILD_BUG_ON(!__builtin_constant_p(dir));
@@ -130,10 +106,10 @@ static __always_inline void __allow_user_access(void __user *to, const void __us
return;
current->thread.kuap = (__force u32)to;
- kuap_unlock_one((__force u32)to);
+ uaccess_begin_32s((__force u32)to);
}
-static __always_inline void __prevent_user_access(unsigned long dir)
+static __always_inline void prevent_user_access(unsigned long dir)
{
u32 kuap = current->thread.kuap;
@@ -143,42 +119,51 @@ static __always_inline void __prevent_user_access(unsigned long dir)
return;
current->thread.kuap = KUAP_NONE;
- kuap_lock_addr(kuap, true);
+ uaccess_end_32s(kuap);
}
-static inline unsigned long __prevent_user_access_return(void)
+static __always_inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
if (flags != KUAP_NONE) {
current->thread.kuap = KUAP_NONE;
- kuap_lock_addr(flags, true);
+ uaccess_end_32s(flags);
}
return flags;
}
-static inline void __restore_user_access(unsigned long flags)
+static __always_inline void restore_user_access(unsigned long flags)
{
if (flags != KUAP_NONE) {
current->thread.kuap = flags;
- kuap_unlock(flags, true);
+ uaccess_begin_32s(flags);
}
}
-static inline bool
+static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
unsigned long kuap = regs->kuap;
- if (!is_write || kuap == KUAP_ALL)
+ if (!is_write)
return false;
if (kuap == KUAP_NONE)
return true;
- /* If faulting address doesn't match unlocked segment, unlock all */
- if ((kuap ^ address) & 0xf0000000)
- regs->kuap = KUAP_ALL;
+ /*
+ * If faulting address doesn't match unlocked segment, change segment.
+ * In case of unaligned store crossing two segments, emulate store.
+ */
+ if ((kuap ^ address) & 0xf0000000) {
+ if (!(kuap & 0x0fffffff) && address > kuap - 4 && fix_alignment(regs)) {
+ regs_add_return_ip(regs, 4);
+ emulate_single_step(regs);
+ } else {
+ regs->kuap = address;
+ }
+ }
return false;
}
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 86650d13ebe6..9b13eb14e21b 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -536,58 +536,43 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* This low level function performs the actual PTE insertion
- * Setting the PTE depends on the MMU type and other factors. It's
- * an horrible mess that I'm not going to try to clean up now but
- * I'm keeping it in one place rather than spread around
+ * Setting the PTE depends on the MMU type and other factors.
+ *
+ * First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve
+ * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+ * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+ * and see we need to keep track that this PTE needs invalidating.
+ *
+ * Second case is 32-bit with 64-bit PTE. In this case, we
+ * can just store as long as we do the two halves in the right order
+ * with a barrier in between. This is possible because we take care,
+ * in the hash code, to pre-invalidate if the PTE was already hashed,
+ * which synchronizes us with any concurrent invalidation.
+ * In the percpu case, we fallback to the simple update preserving
+ * the hash bits (ie, same as the non-SMP case).
+ *
+ * Third case is 32-bit in SMP mode with 32-bit PTEs. We use the
+ * helper pte_update() which does an atomic update. We need to do that
+ * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+ * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving
+ * the hash bits instead.
*/
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
-#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
- /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
- * helper pte_update() which does an atomic update. We need to do that
- * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
- * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
- * the hash bits instead (ie, same as the non-SMP case)
- */
- if (percpu)
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
- else
- pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
+ if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) {
+ *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
+ (pte_val(pte) & ~_PAGE_HASHPTE));
+ } else if (IS_ENABLED(CONFIG_PTE_64BIT)) {
+ if (pte_val(*ptep) & _PAGE_HASHPTE)
+ flush_hash_entry(mm, ptep, addr);
-#elif defined(CONFIG_PTE_64BIT)
- /* Second case is 32-bit with 64-bit PTE. In this case, we
- * can just store as long as we do the two halves in the right order
- * with a barrier in between. This is possible because we take care,
- * in the hash code, to pre-invalidate if the PTE was already hashed,
- * which synchronizes us with any concurrent invalidation.
- * In the percpu case, we also fallback to the simple update preserving
- * the hash bits
- */
- if (percpu) {
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
- return;
+ asm volatile("stw%X0 %2,%0; eieio; stw%X1 %L2,%1" :
+ "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) :
+ "r" (pte) : "memory");
+ } else {
+ pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
}
- if (pte_val(*ptep) & _PAGE_HASHPTE)
- flush_hash_entry(mm, ptep, addr);
- __asm__ __volatile__("\
- stw%X0 %2,%0\n\
- eieio\n\
- stw%X1 %L2,%1"
- : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
- : "r" (pte) : "memory");
-
-#else
- /* Third case is 32-bit hash table in UP mode, we need to preserve
- * the _PAGE_HASHPTE bit since we may not have invalidated the previous
- * translation in the hash yet (done in a subsequent flush_tlb_xxx())
- * and see we need to keep track that this PTE needs invalidating
- */
- *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
- | (pte_val(pte) & ~_PAGE_HASHPTE));
-#endif
}
/*
diff --git a/arch/powerpc/include/asm/book3s/64/hash-pkey.h b/arch/powerpc/include/asm/book3s/64/hash-pkey.h
index f1e60d579f6c..6c5564c4fae4 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-pkey.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-pkey.h
@@ -24,7 +24,7 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL));
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) ||
+ if (mmu_has_feature(MMU_FTR_KUAP) ||
mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
if ((pte_pkey == 0) && (flags & HPTE_USE_KERNEL_KEY))
return HASH_DEFAULT_KERNEL_KEY;
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 84c09e546115..497a7bd31ecc 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -31,7 +31,7 @@
mfspr \gpr2, SPRN_AMR
cmpd \gpr1, \gpr2
beq 99f
- END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_KUAP, 68)
isync
mtspr SPRN_AMR, \gpr1
@@ -78,7 +78,7 @@
* No need to restore IAMR when returning to kernel space.
*/
100:
- END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)
#endif
.endm
@@ -91,7 +91,7 @@
LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
999: tdne \gpr1, \gpr2
EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
- END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 67)
#endif
.endm
#endif
@@ -130,7 +130,7 @@
*/
BEGIN_MMU_FTR_SECTION_NESTED(68)
b 100f // skip_save_amr
- END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_KUAP, 68)
/*
* if pkey is disabled and we are entering from userspace
@@ -166,7 +166,7 @@
mtspr SPRN_AMR, \gpr2
isync
102:
- END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_KUAP, 69)
/*
* if entering from kernel we don't need save IAMR
@@ -213,14 +213,14 @@ extern u64 __ro_after_init default_iamr;
* access restrictions. Because of this ignore AMR value when accessing
* userspace via kernel thread.
*/
-static inline u64 current_thread_amr(void)
+static __always_inline u64 current_thread_amr(void)
{
if (current->thread.regs)
return current->thread.regs->amr;
return default_amr;
}
-static inline u64 current_thread_iamr(void)
+static __always_inline u64 current_thread_iamr(void)
{
if (current->thread.regs)
return current->thread.regs->iamr;
@@ -230,12 +230,7 @@ static inline u64 current_thread_iamr(void)
#ifdef CONFIG_PPC_KUAP
-static __always_inline bool kuap_is_disabled(void)
-{
- return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
-}
-
-static inline void kuap_user_restore(struct pt_regs *regs)
+static __always_inline void kuap_user_restore(struct pt_regs *regs)
{
bool restore_amr = false, restore_iamr = false;
unsigned long amr, iamr;
@@ -243,7 +238,7 @@ static inline void kuap_user_restore(struct pt_regs *regs)
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
- if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+ if (!mmu_has_feature(MMU_FTR_KUAP)) {
amr = mfspr(SPRN_AMR);
if (amr != regs->amr)
restore_amr = true;
@@ -274,7 +269,7 @@ static inline void kuap_user_restore(struct pt_regs *regs)
*/
}
-static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
+static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{
if (likely(regs->amr == amr))
return;
@@ -290,7 +285,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr
*/
}
-static inline unsigned long __kuap_get_and_assert_locked(void)
+static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long amr = mfspr(SPRN_AMR);
@@ -298,22 +293,16 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
return amr;
}
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
-/* Do nothing, book3s/64 does that in ASM */
-static inline void __kuap_lock(void)
-{
-}
-
-static inline void __kuap_save_and_lock(struct pt_regs *regs)
-{
-}
+/* __kuap_lock() not required, book3s/64 does that in ASM */
/*
* We support individually allowing read or write, but we don't support nesting
* because that would require an expensive read/modify write of the AMR.
*/
-static inline unsigned long get_kuap(void)
+static __always_inline unsigned long get_kuap(void)
{
/*
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
@@ -323,7 +312,7 @@ static inline unsigned long get_kuap(void)
* This has no effect in terms of actually blocking things on hash,
* so it doesn't break anything.
*/
- if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ if (!mmu_has_feature(MMU_FTR_KUAP))
return AMR_KUAP_BLOCKED;
return mfspr(SPRN_AMR);
@@ -331,7 +320,7 @@ static inline unsigned long get_kuap(void)
static __always_inline void set_kuap(unsigned long value)
{
- if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ if (!mmu_has_feature(MMU_FTR_KUAP))
return;
/*
@@ -343,7 +332,8 @@ static __always_inline void set_kuap(unsigned long value)
isync();
}
-static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+static __always_inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
/*
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
@@ -386,12 +376,12 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#else /* CONFIG_PPC_KUAP */
-static inline unsigned long get_kuap(void)
+static __always_inline unsigned long get_kuap(void)
{
return AMR_KUAP_BLOCKED;
}
-static inline void set_kuap(unsigned long value) { }
+static __always_inline void set_kuap(unsigned long value) { }
static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
@@ -406,7 +396,7 @@ static __always_inline void prevent_user_access(unsigned long dir)
do_uaccess_flush();
}
-static inline unsigned long prevent_user_access_return(void)
+static __always_inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = get_kuap();
@@ -417,7 +407,7 @@ static inline unsigned long prevent_user_access_return(void)
return flags;
}
-static inline void restore_user_access(unsigned long flags)
+static __always_inline void restore_user_access(unsigned long flags)
{
set_kuap(flags);
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 570a4960cf17..fedbc5d38191 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -71,10 +71,7 @@ extern unsigned int mmu_pid_bits;
/* Base PID to allocate from */
extern unsigned int mmu_base_pid;
-/*
- * memory block size used with radix translation.
- */
-extern unsigned long __ro_after_init radix_mem_block_size;
+extern unsigned long __ro_after_init memory_block_size;
#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
#define PRTB_ENTRIES (1ul << mmu_pid_bits)
@@ -261,7 +258,7 @@ static inline void radix_init_pseries(void) { }
#define arch_clear_mm_cpumask_cpu(cpu, mm) \
do { \
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
- atomic_dec(&(mm)->context.active_cpus); \
+ dec_mm_active_cpus(mm); \
cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
} \
} while (0)
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 00c6b0b4ede4..1db485aacbd9 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -120,6 +120,7 @@
struct pt_regs;
void hash__do_page_fault(struct pt_regs *);
void bad_page_fault(struct pt_regs *, int);
+void emulate_single_step(struct pt_regs *regs);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void _exception_pkey(struct pt_regs *, unsigned long, int);
extern void die(const char *, struct pt_regs *, long);
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 9ee192a6c5d7..249d43cc6427 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -1080,6 +1080,9 @@ typedef struct im_idma {
#define FCC2_MEM_OFFSET FCC_MEM_OFFSET(1)
#define FCC3_MEM_OFFSET FCC_MEM_OFFSET(2)
+/* Pipeline Maximum Depth */
+#define MPC82XX_BCR_PLDP 0x00800000
+
/* Clocks and GRG's */
enum cpm_clk_dir {
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 443a9d482b15..8765d5158324 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -252,7 +252,7 @@ static inline void cpu_feature_keys_init(void) { }
* This is also required by 52xx family.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_MPC10X_BRIDGE) \
- || defined(CONFIG_PPC_83xx) || defined(CONFIG_8260) \
+ || defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_82xx) \
|| defined(CONFIG_PPC_MPC52xx)
#define CPU_FTR_COMMON CPU_FTR_NEED_COHERENT
#else
diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
index 4bcb9f9ac764..d6f43d149f8d 100644
--- a/arch/powerpc/include/asm/dtl.h
+++ b/arch/powerpc/include/asm/dtl.h
@@ -39,6 +39,5 @@ extern rwlock_t dtl_access_lock;
extern void register_dtl_buffer(int cpu);
extern void alloc_dtl_buffers(unsigned long *time_limit);
-extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
#endif /* _ASM_POWERPC_DTL_H */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index ac605fc369c4..77824bd289a3 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -292,6 +292,7 @@ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
void apply_feature_fixups(void);
+void update_mmu_feature_fixups(unsigned long mask);
void setup_feature_keys(void);
#endif
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
index 8def56ec05c6..d530f68b4eef 100644
--- a/arch/powerpc/include/asm/fs_pd.h
+++ b/arch/powerpc/include/asm/fs_pd.h
@@ -14,28 +14,6 @@
#include <sysdev/fsl_soc.h>
#include <asm/time.h>
-#ifdef CONFIG_CPM2
-#include <asm/cpm2.h>
-
-#if defined(CONFIG_8260)
-#include <asm/mpc8260.h>
-#endif
-
-#define cpm2_map(member) (&cpm2_immr->member)
-#define cpm2_map_size(member, size) (&cpm2_immr->member)
-#define cpm2_unmap(addr) do {} while(0)
-#endif
-
-#ifdef CONFIG_PPC_8xx
-#include <asm/8xx_immap.h>
-
-extern immap_t __iomem *mpc8xx_immr;
-
-#define immr_map(member) (&mpc8xx_immr->member)
-#define immr_map_size(member, size) (&mpc8xx_immr->member)
-#define immr_unmap(addr) do {} while (0)
-#endif
-
static inline int uart_baudrate(void)
{
return get_baudrate();
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 91c049d51d0e..9e5a39b6a311 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -11,8 +11,8 @@
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
/* Ignore unused weak functions which will have larger offsets */
-#ifdef CONFIG_MPROFILE_KERNEL
-#define FTRACE_MCOUNT_MAX_OFFSET 12
+#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
+#define FTRACE_MCOUNT_MAX_OFFSET 16
#elif defined(CONFIG_PPC32)
#define FTRACE_MCOUNT_MAX_OFFSET 8
#endif
@@ -22,18 +22,26 @@ extern void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
- /* relocation of mcount call site is the same as the address */
+ if (IS_ENABLED(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY))
+ addr += MCOUNT_INSN_SIZE;
+
return addr;
}
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
unsigned long sp);
+struct module;
+struct dyn_ftrace;
struct dyn_arch_ftrace {
struct module *mod;
};
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+#define ftrace_need_init_nop() (true)
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+
struct ftrace_regs {
struct pt_regs regs;
};
@@ -124,15 +132,19 @@ static inline u8 this_cpu_get_ftrace_enabled(void)
{
return get_paca()->ftrace_enabled;
}
-
-void ftrace_free_init_tramp(void);
#else /* CONFIG_PPC64 */
static inline void this_cpu_disable_ftrace(void) { }
static inline void this_cpu_enable_ftrace(void) { }
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
-static inline void ftrace_free_init_tramp(void) { }
#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+void ftrace_free_init_tramp(void);
+#else
+static inline void ftrace_free_init_tramp(void) { }
+#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_FTRACE */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index 84d39fd42f71..66db0147d5b4 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -18,6 +18,7 @@ struct arch_hw_breakpoint {
u16 len; /* length of the target data symbol */
u16 hw_len; /* length programmed in hw */
u8 flags;
+ bool perf_single_step; /* temporarily uninstalled for a perf single step */
};
/* Note: Don't change the first 6 bits below as they are in the same order
diff --git a/arch/powerpc/include/asm/ibmebus.h b/arch/powerpc/include/asm/ibmebus.h
index 088f95b2e14f..6f33253a364a 100644
--- a/arch/powerpc/include/asm/ibmebus.h
+++ b/arch/powerpc/include/asm/ibmebus.h
@@ -46,6 +46,8 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+struct platform_driver;
+
extern struct bus_type ibmebus_bus_type;
int ibmebus_register_driver(struct platform_driver *drv);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 34e14dfd8e04..026695943550 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -28,6 +28,9 @@
#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
+#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
+#define DMA64_PROPNAME "linux,dma64-ddr-window-info"
+
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
index 6fd2b4d486c5..424ceef82ae6 100644
--- a/arch/powerpc/include/asm/kfence.h
+++ b/arch/powerpc/include/asm/kfence.h
@@ -23,7 +23,7 @@ static inline bool arch_kfence_init_pool(void)
#ifdef CONFIG_PPC64
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
- struct page *page = virt_to_page(addr);
+ struct page *page = virt_to_page((void *)addr);
__kernel_map_pages(page, 1, !protect);
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index d751ddd08110..ad7e8c5aec3f 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -6,6 +6,12 @@
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+static __always_inline bool kuap_is_disabled(void);
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup.h>
#endif
@@ -41,26 +47,24 @@ void setup_kuep(bool disabled);
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !mmu_has_feature(MMU_FTR_KUAP);
+}
#else
static inline void setup_kuap(bool disabled) { }
static __always_inline bool kuap_is_disabled(void) { return true; }
-static inline bool
+static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}
-static inline void __kuap_lock(void) { }
-static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
-static inline void kuap_user_restore(struct pt_regs *regs) { }
-static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
-
-static inline unsigned long __kuap_get_and_assert_locked(void)
-{
- return 0;
-}
+static __always_inline void kuap_user_restore(struct pt_regs *regs) { }
+static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
/*
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
@@ -68,11 +72,11 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
* platforms.
*/
#ifndef CONFIG_PPC_BOOK3S_64
-static inline void __allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
-static inline void __prevent_user_access(unsigned long dir) { }
-static inline unsigned long __prevent_user_access_return(void) { return 0UL; }
-static inline void __restore_user_access(unsigned long flags) { }
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir) { }
+static __always_inline void prevent_user_access(unsigned long dir) { }
+static __always_inline unsigned long prevent_user_access_return(void) { return 0UL; }
+static __always_inline void restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */
@@ -85,29 +89,24 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
return __bad_kuap_fault(regs, address, is_write);
}
-static __always_inline void kuap_assert_locked(void)
-{
- if (kuap_is_disabled())
- return;
-
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
- __kuap_get_and_assert_locked();
-}
-
static __always_inline void kuap_lock(void)
{
+#ifdef __kuap_lock
if (kuap_is_disabled())
return;
__kuap_lock();
+#endif
}
static __always_inline void kuap_save_and_lock(struct pt_regs *regs)
{
+#ifdef __kuap_save_and_lock
if (kuap_is_disabled())
return;
__kuap_save_and_lock(regs);
+#endif
}
static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
@@ -120,46 +119,18 @@ static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned l
static __always_inline unsigned long kuap_get_and_assert_locked(void)
{
- if (kuap_is_disabled())
- return 0;
-
- return __kuap_get_and_assert_locked();
-}
-
-#ifndef CONFIG_PPC_BOOK3S_64
-static __always_inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
-{
- if (kuap_is_disabled())
- return;
-
- __allow_user_access(to, from, size, dir);
-}
-
-static __always_inline void prevent_user_access(unsigned long dir)
-{
- if (kuap_is_disabled())
- return;
-
- __prevent_user_access(dir);
-}
-
-static __always_inline unsigned long prevent_user_access_return(void)
-{
- if (kuap_is_disabled())
- return 0;
-
- return __prevent_user_access_return();
+#ifdef __kuap_get_and_assert_locked
+ if (!kuap_is_disabled())
+ return __kuap_get_and_assert_locked();
+#endif
+ return 0;
}
-static __always_inline void restore_user_access(unsigned long flags)
+static __always_inline void kuap_assert_locked(void)
{
- if (kuap_is_disabled())
- return;
-
- __restore_user_access(flags);
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ kuap_get_and_assert_locked();
}
-#endif /* CONFIG_PPC_BOOK3S_64 */
static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 34d44cb17c87..61ec2447dabf 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -6,28 +6,6 @@
#ifndef _ASM_POWERPC_LPPACA_H
#define _ASM_POWERPC_LPPACA_H
-/*
- * The below VPHN macros are outside the __KERNEL__ check since these are
- * used for compiling the vphn selftest in userspace
- */
-
-/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
-#define VPHN_REGISTER_COUNT 6
-
-/*
- * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
- * form the complete property we have to add the length in the first cell.
- */
-#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
-
-/*
- * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags:
- * 1 for retrieving associativity information for a guest cpu
- * 2 for retrieving associativity information for a host/hypervisor cpu
- */
-#define VPHN_FLAG_VCPU 1
-#define VPHN_FLAG_PCPU 2
-
#ifdef __KERNEL__
/*
@@ -45,6 +23,7 @@
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
+#include <asm/paca.h>
/*
* The lppaca is the "virtual processor area" registered with the hypervisor,
@@ -127,13 +106,23 @@ struct lppaca {
*/
#define LPPACA_OLD_SHARED_PROC 2
-static inline bool lppaca_shared_proc(struct lppaca *l)
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * All CPUs should have the same shared proc value, so directly access the PACA
+ * to avoid false positives from DEBUG_PREEMPT.
+ */
+static inline bool lppaca_shared_proc(void)
{
+ struct lppaca *l = local_paca->lppaca_ptr;
+
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return false;
return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
}
+#define get_lppaca() (get_paca()->lppaca_ptr)
+#endif
+
/*
* SLB shadow buffer structure as defined in the PAPR. The save_area
* contains adjacent ESID and VSID pairs for each shadowed SLB. The
@@ -149,8 +138,6 @@ struct slb_shadow {
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
-
#endif /* CONFIG_PPC_BOOK3S */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/arch/powerpc/include/asm/macio.h b/arch/powerpc/include/asm/macio.h
index ff5fd82d9ff0..3a07c62973aa 100644
--- a/arch/powerpc/include/asm/macio.h
+++ b/arch/powerpc/include/asm/macio.h
@@ -3,7 +3,8 @@
#define __MACIO_ASIC_H__
#ifdef __KERNEL__
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
extern struct bus_type macio_bus_type;
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 94b981152667..52cc25864a1b 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -33,7 +33,7 @@
* key 0 controlling userspace addresses on radix
* Key 3 on hash
*/
-#define MMU_FTR_BOOK3S_KUAP ASM_CONST(0x00000200)
+#define MMU_FTR_KUAP ASM_CONST(0x00000200)
/*
* Supports KUEP feature
@@ -144,11 +144,6 @@
typedef pte_t *pgtable_t;
-#ifdef CONFIG_PPC_E500
-#include <asm/percpu.h>
-DECLARE_PER_CPU(int, next_tlbcam_idx);
-#endif
-
enum {
MMU_FTRS_POSSIBLE =
#if defined(CONFIG_PPC_BOOK3S_604)
@@ -188,7 +183,7 @@ enum {
#endif /* CONFIG_PPC_RADIX_MMU */
#endif
#ifdef CONFIG_PPC_KUAP
- MMU_FTR_BOOK3S_KUAP |
+ MMU_FTR_KUAP |
#endif /* CONFIG_PPC_KUAP */
#ifdef CONFIG_PPC_MEM_KEYS
MMU_FTR_PKEY |
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 57f5017111f4..37bffa0f7918 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -127,6 +127,7 @@ static inline void inc_mm_active_cpus(struct mm_struct *mm)
static inline void dec_mm_active_cpus(struct mm_struct *mm)
{
+ VM_WARN_ON_ONCE(atomic_read(&mm->context.active_cpus) <= 0);
atomic_dec(&mm->context.active_cpus);
}
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index ac53606c2594..a8e2e8339fb7 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -75,10 +75,6 @@ struct mod_arch_specific {
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-# ifdef MODULE
- asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
-# endif /* MODULE */
-
int module_trampoline_target(struct module *mod, unsigned long trampoline,
unsigned long *target);
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
diff --git a/arch/powerpc/include/asm/mpc8260.h b/arch/powerpc/include/asm/mpc8260.h
deleted file mode 100644
index 155114bbd1a2..000000000000
--- a/arch/powerpc/include/asm/mpc8260.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Since there are many different boards and no standard configuration,
- * we have a unique include file for each. Rather than change every
- * file that has to include MPC8260 configuration, they all include
- * this one and the configuration switching is done here.
- */
-#ifdef __KERNEL__
-#ifndef __ASM_POWERPC_MPC8260_H__
-#define __ASM_POWERPC_MPC8260_H__
-
-#define MPC82XX_BCR_PLDP 0x00800000 /* Pipeline Maximum Depth */
-
-#ifdef CONFIG_8260
-
-#ifdef CONFIG_PCI_8260
-#include <platforms/82xx/m82xx_pci.h>
-#endif
-
-#endif /* CONFIG_8260 */
-#endif /* !__ASM_POWERPC_MPC8260_H__ */
-#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index c44d97751723..46bc5925e5fd 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -9,76 +9,74 @@
#ifndef __ASSEMBLY__
-#include <linux/jump_label.h>
-
#include <asm/reg.h>
-extern struct static_key_false disable_kuap_key;
-
-static __always_inline bool kuap_is_disabled(void)
-{
- return static_branch_unlikely(&disable_kuap_key);
-}
-
-static inline void __kuap_lock(void)
-{
-}
-
-static inline void __kuap_save_and_lock(struct pt_regs *regs)
+static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
{
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
+#define __kuap_save_and_lock __kuap_save_and_lock
-static inline void kuap_user_restore(struct pt_regs *regs)
+static __always_inline void kuap_user_restore(struct pt_regs *regs)
{
}
-static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
mtspr(SPRN_MD_AP, regs->kuap);
}
-static inline unsigned long __kuap_get_and_assert_locked(void)
+#ifdef CONFIG_PPC_KUAP_DEBUG
+static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{
- unsigned long kuap;
+ WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16);
- kuap = mfspr(SPRN_MD_AP);
+ return 0;
+}
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
+#endif
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
- WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
+static __always_inline void uaccess_begin_8xx(unsigned long val)
+{
+ asm(ASM_MMU_FTR_IFSET("mtspr %0, %1", "", %2) : :
+ "i"(SPRN_MD_AP), "r"(val), "i"(MMU_FTR_KUAP) : "memory");
+}
- return kuap;
+static __always_inline void uaccess_end_8xx(void)
+{
+ asm(ASM_MMU_FTR_IFSET("mtspr %0, %1", "", %2) : :
+ "i"(SPRN_MD_AP), "r"(MD_APG_KUAP), "i"(MMU_FTR_KUAP) : "memory");
}
-static inline void __allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
{
- mtspr(SPRN_MD_AP, MD_APG_INIT);
+ uaccess_begin_8xx(MD_APG_INIT);
}
-static inline void __prevent_user_access(unsigned long dir)
+static __always_inline void prevent_user_access(unsigned long dir)
{
- mtspr(SPRN_MD_AP, MD_APG_KUAP);
+ uaccess_end_8xx();
}
-static inline unsigned long __prevent_user_access_return(void)
+static __always_inline unsigned long prevent_user_access_return(void)
{
unsigned long flags;
flags = mfspr(SPRN_MD_AP);
- mtspr(SPRN_MD_AP, MD_APG_KUAP);
+ uaccess_end_8xx();
return flags;
}
-static inline void __restore_user_access(unsigned long flags)
+static __always_inline void restore_user_access(unsigned long flags)
{
- mtspr(SPRN_MD_AP, flags);
+ uaccess_begin_8xx(flags);
}
-static inline bool
+static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 33213b31fcbb..f99c53a5f184 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -355,7 +355,7 @@ static inline int pte_young(pte_t pte)
#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
- ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
+ ((const void *)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
#endif
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index abe4fd82721e..5cd9acf58a7d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -127,7 +127,7 @@ static inline pte_t pmd_pte(pmd_t pmd)
#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
|| (pmd_val(pmd) & PMD_BAD_BITS))
#define pmd_present(pmd) (!pmd_none(pmd))
-#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
+#define pmd_page_vaddr(pmd) ((const void *)(pmd_val(pmd) & ~PMD_MASKED_BITS))
extern struct page *pmd_page(pmd_t pmd);
#define pmd_pfn(pmd) (page_to_pfn(pmd_page(pmd)))
diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h
index 49bb41ed0816..0c7c3258134c 100644
--- a/arch/powerpc/include/asm/nohash/kup-booke.h
+++ b/arch/powerpc/include/asm/nohash/kup-booke.h
@@ -3,6 +3,7 @@
#define _ASM_POWERPC_KUP_BOOKE_H_
#include <asm/bug.h>
+#include <asm/mmu.h>
#ifdef CONFIG_PPC_KUAP
@@ -13,32 +14,26 @@
#else
-#include <linux/jump_label.h>
#include <linux/sched.h>
#include <asm/reg.h>
-extern struct static_key_false disable_kuap_key;
-
-static __always_inline bool kuap_is_disabled(void)
-{
- return static_branch_unlikely(&disable_kuap_key);
-}
-
-static inline void __kuap_lock(void)
+static __always_inline void __kuap_lock(void)
{
mtspr(SPRN_PID, 0);
isync();
}
+#define __kuap_lock __kuap_lock
-static inline void __kuap_save_and_lock(struct pt_regs *regs)
+static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
{
regs->kuap = mfspr(SPRN_PID);
mtspr(SPRN_PID, 0);
isync();
}
+#define __kuap_save_and_lock __kuap_save_and_lock
-static inline void kuap_user_restore(struct pt_regs *regs)
+static __always_inline void kuap_user_restore(struct pt_regs *regs)
{
if (kuap_is_disabled())
return;
@@ -48,7 +43,7 @@ static inline void kuap_user_restore(struct pt_regs *regs)
/* Context synchronisation is performed by rfi */
}
-static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
if (regs->kuap)
mtspr(SPRN_PID, current->thread.pid);
@@ -56,48 +51,55 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua
/* Context synchronisation is performed by rfi */
}
-static inline unsigned long __kuap_get_and_assert_locked(void)
+#ifdef CONFIG_PPC_KUAP_DEBUG
+static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{
- unsigned long kuap = mfspr(SPRN_PID);
+ WARN_ON_ONCE(mfspr(SPRN_PID));
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
- WARN_ON_ONCE(kuap);
+ return 0;
+}
+#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
+#endif
- return kuap;
+static __always_inline void uaccess_begin_booke(unsigned long val)
+{
+ asm(ASM_MMU_FTR_IFSET("mtspr %0, %1; isync", "", %2) : :
+ "i"(SPRN_PID), "r"(val), "i"(MMU_FTR_KUAP) : "memory");
}
-static inline void __allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static __always_inline void uaccess_end_booke(void)
{
- mtspr(SPRN_PID, current->thread.pid);
- isync();
+ asm(ASM_MMU_FTR_IFSET("mtspr %0, %1; isync", "", %2) : :
+ "i"(SPRN_PID), "r"(0), "i"(MMU_FTR_KUAP) : "memory");
}
-static inline void __prevent_user_access(unsigned long dir)
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
{
- mtspr(SPRN_PID, 0);
- isync();
+ uaccess_begin_booke(current->thread.pid);
+}
+
+static __always_inline void prevent_user_access(unsigned long dir)
+{
+ uaccess_end_booke();
}
-static inline unsigned long __prevent_user_access_return(void)
+static __always_inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = mfspr(SPRN_PID);
- mtspr(SPRN_PID, 0);
- isync();
+ uaccess_end_booke();
return flags;
}
-static inline void __restore_user_access(unsigned long flags)
+static __always_inline void restore_user_access(unsigned long flags)
{
- if (flags) {
- mtspr(SPRN_PID, current->thread.pid);
- isync();
- }
+ if (flags)
+ uaccess_begin_booke(current->thread.pid);
}
-static inline bool
+static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return !regs->kuap;
diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
index e43a418d3ccd..6ddced0415cb 100644
--- a/arch/powerpc/include/asm/nohash/mmu-e500.h
+++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
@@ -319,6 +319,9 @@ extern int book3e_htw_mode;
#endif
+#include <asm/percpu.h>
+DECLARE_PER_CPU(int, next_tlbcam_idx);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index cb325938766a..e667d455ecb4 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -15,7 +15,6 @@
#include <linux/cache.h>
#include <linux/string.h>
#include <asm/types.h>
-#include <asm/lppaca.h>
#include <asm/mmu.h>
#include <asm/page.h>
#ifdef CONFIG_PPC_BOOK3E_64
@@ -47,14 +46,11 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_paca() local_paca
#endif
-#ifdef CONFIG_PPC_PSERIES
-#define get_lppaca() (get_paca()->lppaca_ptr)
-#endif
-
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
struct rtas_args;
+struct lppaca;
/*
* Defines the layout of the paca.
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f2b6bf5687d0..e5fcc79b5bfb 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -9,6 +9,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/bug.h>
#else
#include <asm/types.h>
#endif
@@ -119,16 +120,6 @@ extern long long virt_phys_offset;
#define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
#endif
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-
-#define virt_addr_valid(vaddr) ({ \
- unsigned long _addr = (unsigned long)vaddr; \
- _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
- pfn_valid(virt_to_pfn(_addr)); \
-})
-
/*
* On Book-E parts we need __va to parse the device tree and we can't
* determine MEMORY_START until then. However we can determine PHYSICAL_START
@@ -233,6 +224,25 @@ extern long long virt_phys_offset;
#endif
#endif
+#ifndef __ASSEMBLY__
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
+static inline const void *pfn_to_kaddr(unsigned long pfn)
+{
+ return __va(pfn << PAGE_SHIFT);
+}
+#endif
+
+#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
+ pfn_valid(virt_to_pfn((void *)_addr)); \
+})
+
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
* and needs to be executable. This means the whole heap ends
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index f5ba1a3c41f8..e08513d73119 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -6,6 +6,7 @@
#include <asm/smp.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
+#include <asm/lppaca.h>
#include <asm/hvcall.h>
#endif
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 289f1ec85bc5..f5078a7dd85a 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -82,7 +82,8 @@ extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val,
extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state);
-
+extern void pci_adjust_legacy_attr(struct pci_bus *bus,
+ enum pci_mmap_state mmap_type);
#define HAVE_PCI_LEGACY 1
extern void pcibios_claim_one_bus(struct pci_bus *b);
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index b2e9bc4a52c1..d0ee46de248e 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -72,9 +72,9 @@ static inline pgprot_t pte_pgprot(pte_t pte)
}
#ifndef pmd_page_vaddr
-static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+static inline const void *pmd_page_vaddr(pmd_t pmd)
{
- return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
+ return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
}
#define pmd_page_vaddr pmd_page_vaddr
#endif
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 8239c0af5eb2..fe3d0ea0058a 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -9,6 +9,7 @@
#include <asm/hvcall.h>
#include <asm/paca.h>
+#include <asm/lppaca.h>
#include <asm/page.h>
static inline long poll_pending(void)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ef6972aa33b9..005601243dda 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -397,6 +397,7 @@
#define PPC_RAW_RFCI (0x4c000066)
#define PPC_RAW_RFDI (0x4c00004e)
#define PPC_RAW_RFMCI (0x4c00004c)
+#define PPC_RAW_TLBILX_LPID (0x7c000024)
#define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_RAW_WAIT_v203 (0x7c00007c)
#define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p))
@@ -616,6 +617,7 @@
#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_RAW_TLBILX(t, a, b))
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
+#define PPC_TLBILX_LPID stringify_in_c(.long PPC_RAW_TLBILX_LPID)
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
#define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203)
#define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p))
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index a6c7069bec5d..b2c51d337e60 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -172,11 +172,6 @@ struct thread_struct {
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *ptrace_bps[HBP_NUM_MAX];
- /*
- * Helps identify source of single-step exception and subsequent
- * hw-breakpoint enablement
- */
- struct perf_event *last_hit_ubp[HBP_NUM_MAX];
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */
unsigned long trap_nr; /* last trap # on this thread */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bb0121222ee3..4ae4ab9090a2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1414,11 +1414,9 @@ static inline void mtmsr_isync(unsigned long val)
#define mfspr(rn) ({unsigned long rval; \
asm volatile("mfspr %0," __stringify(rn) \
: "=r" (rval)); rval;})
-#ifndef mtspr
#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \
: "r" ((unsigned long)(v)) \
: "memory")
-#endif
#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
static inline void wrtee(unsigned long val)
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 3abe15ac79db..c697c3c74694 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -202,7 +202,9 @@ typedef struct {
#define RTAS_USER_REGION_SIZE (64 * 1024)
/* RTAS return status codes */
+#define RTAS_HARDWARE_ERROR -1 /* Hardware Error */
#define RTAS_BUSY -2 /* RTAS Busy */
+#define RTAS_INVALID_PARAMETER -3 /* Invalid indicator/domain/sensor etc. */
#define RTAS_EXTENDED_DELAY_MIN 9900
#define RTAS_EXTENDED_DELAY_MAX 9905
@@ -425,6 +427,7 @@ extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
int rtas_ibm_suspend_me(int *fw_status);
+int rtas_error_rc(int rtas_rc);
struct rtc_time;
extern time64_t rtas_get_boot_time(void);
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4e1f548c8d37..ea26665f82cf 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -74,6 +74,8 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
(unsigned long)_stext < end;
}
+#else
+static inline unsigned long kernel_toc_addr(void) { BUILD_BUG(); return -1UL; }
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e29e83f8a89c..eed74c1fb832 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -8,7 +8,6 @@
extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned long long memory_limit;
-extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 8a4d4f4d9749..f4e6f2dd04b7 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -143,5 +143,20 @@ static inline int cpu_to_coregroup_id(int cpu)
#endif
#endif
+#ifdef CONFIG_HOTPLUG_SMT
+#include <linux/cpu_smt.h>
+#include <asm/cputhreads.h>
+
+static inline bool topology_is_primary_thread(unsigned int cpu)
+{
+ return cpu == cpu_first_thread_sibling(cpu);
+}
+
+static inline bool topology_smt_thread_allowed(unsigned int cpu)
+{
+ return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index a2d255aa9627..fb725ec77926 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -386,7 +386,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n)
extern long __copy_from_user_flushcache(void *dst, const void __user *src,
unsigned size);
-static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr, len)))
return false;
@@ -401,7 +401,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_save prevent_user_access_return
#define user_access_restore restore_user_access
-static __must_check inline bool
+static __must_check __always_inline bool
user_read_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr, len)))
@@ -415,7 +415,7 @@ user_read_access_begin(const void __user *ptr, size_t len)
#define user_read_access_begin user_read_access_begin
#define user_read_access_end prevent_current_read_from_user
-static __must_check inline bool
+static __must_check __always_inline bool
user_write_access_begin(const void __user *ptr, size_t len)
{
if (unlikely(!access_ok(ptr, len)))
diff --git a/arch/powerpc/include/asm/vermagic.h b/arch/powerpc/include/asm/vermagic.h
index b054a8576e5d..6f250fe506bd 100644
--- a/arch/powerpc/include/asm/vermagic.h
+++ b/arch/powerpc/include/asm/vermagic.h
@@ -2,7 +2,9 @@
#ifndef _ASM_VERMAGIC_H
#define _ASM_VERMAGIC_H
-#ifdef CONFIG_MPROFILE_KERNEL
+#ifdef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
+#define MODULE_ARCH_VERMAGIC_FTRACE "patchable-function-entry "
+#elif defined(CONFIG_MPROFILE_KERNEL)
#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel "
#else
#define MODULE_ARCH_VERMAGIC_FTRACE ""
diff --git a/arch/powerpc/include/asm/vphn.h b/arch/powerpc/include/asm/vphn.h
new file mode 100644
index 000000000000..8c2f795eea68
--- /dev/null
+++ b/arch/powerpc/include/asm/vphn.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_VPHN_H
+#define _ASM_POWERPC_VPHN_H
+
+/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
+#define VPHN_REGISTER_COUNT 6
+
+/*
+ * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
+ * form the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
+
+/*
+ * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags:
+ * 1 for retrieving associativity information for a guest cpu
+ * 2 for retrieving associativity information for a host/hypervisor cpu
+ */
+#define VPHN_FLAG_VCPU 1
+#define VPHN_FLAG_PCPU 2
+
+long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
+#endif // _ASM_POWERPC_VPHN_H