From c78e710c1c9fbeff43dddc0aa3d0ff458e70b0cc Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Tue, 6 Dec 2016 21:47:04 -0500 Subject: parisc: Purge TLB before setting PTE The attached change interchanges the order of purging the TLB and setting the corresponding page table entry. TLB purges are strongly ordered. It occurred to me one night that setting the PTE first might have subtle ordering issues on SMP machines and cause random memory corruption. A TLB lock guards the insertion of user TLB entries. So after the TLB is purged, a new entry can't be inserted until the lock is released. This ensures that the new PTE value is used when the lock is released. Since making this change, no random segmentation faults have been observed on the Debian hppa buildd servers. Signed-off-by: John David Anglin Cc: # v3.16+ Signed-off-by: Helge Deller --- arch/parisc/include/asm/pgtable.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/parisc') diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index c2c43f714684..3a4ed9f91d57 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) unsigned long flags; \ spin_lock_irqsave(&pa_tlb_lock, flags); \ old_pte = *ptep; \ - set_pte(ptep, pteval); \ if (pte_inserted(old_pte)) \ purge_tlb_entries(mm, addr); \ + set_pte(ptep, pteval); \ spin_unlock_irqrestore(&pa_tlb_lock, flags); \ } while (0) @@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned spin_unlock_irqrestore(&pa_tlb_lock, flags); return 0; } - set_pte(ptep, pte_mkold(pte)); purge_tlb_entries(vma->vm_mm, addr); + set_pte(ptep, pte_mkold(pte)); spin_unlock_irqrestore(&pa_tlb_lock, flags); return 1; } @@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, spin_lock_irqsave(&pa_tlb_lock, flags); old_pte = *ptep; - set_pte(ptep, __pte(0)); if (pte_inserted(old_pte)) purge_tlb_entries(mm, addr); + set_pte(ptep, __pte(0)); spin_unlock_irqrestore(&pa_tlb_lock, flags); return old_pte; @@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, { unsigned long flags; spin_lock_irqsave(&pa_tlb_lock, flags); - set_pte(ptep, pte_wrprotect(*ptep)); purge_tlb_entries(mm, addr); + set_pte(ptep, pte_wrprotect(*ptep)); spin_unlock_irqrestore(&pa_tlb_lock, flags); } -- cgit From febe42964fe182281859b3d43d844bb25ca49367 Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Tue, 6 Dec 2016 22:02:01 -0500 Subject: parisc: Remove unnecessary TLB purges from flush_dcache_page_asm and flush_icache_page_asm We have four routines in pacache.S that use temporary alias pages: copy_user_page_asm(), clear_user_page_asm(), flush_dcache_page_asm() and flush_icache_page_asm(). copy_user_page_asm() and clear_user_page_asm() don't purge the TLB entry used for the operation. flush_dcache_page_asm() and flush_icache_page_asm do purge the entry. Presumably, this was thought to optimize TLB use. However, the operation is quite heavy weight on PA 1.X processors as we need to take the TLB lock and a TLB broadcast is sent to all processors. This patch removes the purges from flush_dcache_page_asm() and flush_icache_page_asm. Signed-off-by: John David Anglin Cc: # v3.16+ Signed-off-by: Helge Deller --- arch/parisc/kernel/pacache.S | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'arch/parisc') diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 1b39a2acaadf..adf7187f8951 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -892,19 +892,10 @@ ENTRY_CFI(flush_dcache_page_asm) fdc,m r31(%r28) fdc,m r31(%r28) fdc,m r31(%r28) - cmpb,COND(<<) %r28, %r25,1b + cmpb,COND(<<) %r28, %r25,1b fdc,m r31(%r28) sync - -#ifdef CONFIG_PA20 - pdtlb,l %r0(%r25) -#else - tlb_lock %r20,%r21,%r22 - pdtlb %r0(%r25) - tlb_unlock %r20,%r21,%r22 -#endif - bv %r0(%r2) nop .exit @@ -979,17 +970,6 @@ ENTRY_CFI(flush_icache_page_asm) fic,m %r31(%sr4,%r28) sync - -#ifdef CONFIG_PA20 - pdtlb,l %r0(%r28) - pitlb,l %r0(%sr4,%r25) -#else - tlb_lock %r20,%r21,%r22 - pdtlb %r0(%r28) - pitlb %r0(%sr4,%r25) - tlb_unlock %r20,%r21,%r22 -#endif - bv %r0(%r2) nop .exit -- cgit From 24d0492b7d5d321a9c5846c8c974eba9823ffaa0 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 8 Dec 2016 21:00:46 +0100 Subject: parisc: Fix TLB related boot crash on SMP machines At bootup we run measurements to calculate the best threshold for when we should be using full TLB flushes instead of just flushing a specific amount of TLB entries. This performance test is run over the kernel text segment. But running this TLB performance test on the kernel text segment turned out to crash some SMP machines when the kernel text pages were mapped as huge pages. To avoid those crashes this patch simply skips this test on some SMP machines and calculates an optimal threshold based on the maximum number of available TLB entries and number of online CPUs. On a technical side, this seems to happen: The TLB measurement code uses flush_tlb_kernel_range() to flush specific TLB entries with a page size of 4k (pdtlb 0(sr1,addr)). On UP systems this purge instruction seems to work without problems even if the pages were mapped as huge pages. But on SMP systems the TLB purge instruction is broadcasted to other CPUs. Those CPUs then crash the machine because the page size is not as expected. C8000 machines with PA8800/PA8900 CPUs were not affected by this problem, because the required cache coherency prohibits to use huge pages at all. Sadly I didn't found any documentation about this behaviour, so this finding is purely based on testing with phyiscal SMP machines (A500-44 and J5000, both were 2-way boxes). Cc: # v3.18+ Signed-off-by: Helge Deller --- arch/parisc/kernel/cache.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/parisc') diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c263301648f3..977f0a4f5ecf 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -393,6 +393,15 @@ void __init parisc_setup_cache_timing(void) /* calculate TLB flush threshold */ + /* On SMP machines, skip the TLB measure of kernel text which + * has been mapped as huge pages. */ + if (num_online_cpus() > 1 && !parisc_requires_coherency()) { + threshold = max(cache_info.it_size, cache_info.dt_size); + threshold *= PAGE_SIZE; + threshold /= num_online_cpus(); + goto set_tlb_threshold; + } + alltime = mfctl(16); flush_tlb_all(); alltime = mfctl(16) - alltime; @@ -411,6 +420,8 @@ void __init parisc_setup_cache_timing(void) alltime, size, rangetime); threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); + +set_tlb_threshold: if (threshold) parisc_tlb_flush_threshold = threshold; printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", -- cgit