From 1b03e71ff6f2bd10b45a0128ce76e0e42014a44c Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 16 Nov 2020 16:09:31 +0000 Subject: powerpc/32s: Handle PROTFAULT in hash_page() also for CONFIG_PPC_KUAP On hash 32 bits, handling minor protection faults like unsetting dirty flag is heavy if done from the normal page_fault processing, because it implies hash table software lookup for flushing the entry and then a DSI is taken anyway to add the entry back. When KUAP was implemented, as explained in commit a68c31fc01ef ("powerpc/32s: Implement Kernel Userspace Access Protection"), protection faults has been diverted from hash_page() because hash_page() was not able to identify a KUAP fault. Implement KUAP verification in hash_page(), by clearing write permission when the access is a kernel access and Ks is 1. This works regardless of the address because kernel segments always have Ks set to 0 while user segments have Ks set to 0 only when kernel write to userspace is granted. Then protection faults can be handled by hash_page() even for KUAP. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/8a4ffe4798e9ea32aaaccdf85e411bb1beed3500.1605542955.git.christophe.leroy@csgroup.eu --- arch/powerpc/mm/book3s32/hash_low.S | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index ceb90a6e3256..98b8d8a5ad64 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -89,8 +89,6 @@ _GLOBAL(hash_page) #else rlwimi r8,r4,23,20,28 /* compute pte address */ #endif - rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ - ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE /* * Update the linux PTE atomically. We do the lwarx up-front @@ -106,7 +104,18 @@ _GLOBAL(hash_page) #endif .Lretry: lwarx r6,0,r8 /* get linux-style pte, flag word */ +#ifdef CONFIG_PPC_KUAP + mfsrin r5,r4 + rlwinm r0,r9,28,_PAGE_RW /* MSR[PR] => _PAGE_RW */ + rlwinm r5,r5,12,_PAGE_RW /* Ks => _PAGE_RW */ + andc r5,r5,r0 /* Ks & ~MSR[PR] */ + andc r5,r6,r5 /* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */ + andc. r5,r3,r5 /* check access & ~permission */ +#else andc. r5,r3,r6 /* check access & ~permission */ +#endif + rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ + ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE #ifdef CONFIG_SMP bne- .Lhash_page_out /* return if access not permitted */ #else -- cgit