diff options
Diffstat (limited to 'arch/sparc/mm/tlb.c')
| -rw-r--r-- | arch/sparc/mm/tlb.c | 157 |
1 files changed, 119 insertions, 38 deletions
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 7a91f288c708..a35ddcca5e76 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -1,17 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 /* arch/sparc64/mm/tlb.c * * Copyright (C) 2004 David S. Miller <davem@redhat.com> */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/percpu.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/preempt.h> +#include <linux/pagemap.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> @@ -53,22 +52,25 @@ out: void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } void arch_leave_lazy_mmu_mode(void) { - struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, - bool exec) + bool exec, unsigned int hugepage_shift) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; @@ -85,13 +87,21 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, } if (!tb->active) { - flush_tsb_user_page(mm, vaddr); + flush_tsb_user_page(mm, vaddr, hugepage_shift); global_flush_tlb_page(mm, vaddr); goto out; } - if (nr == 0) + if (nr == 0) { tb->mm = mm; + tb->hugepage_shift = hugepage_shift; + } + + if (tb->hugepage_shift != hugepage_shift) { + flush_tlb_pending(); + tb->hugepage_shift = hugepage_shift; + nr = 0; + } tb->vaddrs[nr] = vaddr; tb->tlb_nr = ++nr; @@ -103,13 +113,15 @@ out: } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, - pte_t *ptep, pte_t orig, int fullmm) + pte_t *ptep, pte_t orig, int fullmm, + unsigned int hugepage_shift) { if (tlb_type != hypervisor && pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; + struct folio *folio; if (!pfn_valid(pfn)) goto no_cache_flush; @@ -119,53 +131,71 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, goto no_cache_flush; /* A real file page? */ - mapping = page_mapping(page); + folio = page_folio(page); + mapping = folio_flush_mapping(folio); if (!mapping) goto no_cache_flush; paddr = (unsigned long) page_address(page); if ((paddr ^ vaddr) & (1 << 13)) - flush_dcache_page_all(mm, page); + flush_dcache_folio_all(mm, folio); } no_cache_flush: if (!fullmm) - tlb_batch_add_one(mm, vaddr, pte_exec(orig)); + tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr, - pmd_t pmd, bool exec) + pmd_t pmd) { unsigned long end; pte_t *pte; pte = pte_offset_map(&pmd, vaddr); + if (!pte) + return; end = vaddr + HPAGE_SIZE; while (vaddr < end) { - if (pte_val(*pte) & _PAGE_VALID) - tlb_batch_add_one(mm, vaddr, exec); + if (pte_val(*pte) & _PAGE_VALID) { + bool exec = pte_exec(*pte); + + tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT); + } pte++; vaddr += PAGE_SIZE; } pte_unmap(pte); } -void set_pmd_at(struct mm_struct *mm, unsigned long addr, - pmd_t *pmdp, pmd_t pmd) -{ - pmd_t orig = *pmdp; - - *pmdp = pmd; +static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr, + pmd_t orig, pmd_t pmd) +{ if (mm == &init_mm) return; - if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) { - if (pmd_val(pmd) & PMD_ISHUGE) - mm->context.huge_pte_count++; - else - mm->context.huge_pte_count--; + if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { + /* + * Note that this routine only sets pmds for THP pages. + * Hugetlb pages are handled elsewhere. We need to check + * for huge zero page. Huge zero pages are like hugetlb + * pages in that there is no RSS, but there is the need + * for TSB entries. So, huge zero page counts go into + * hugetlb_pte_count. + */ + if (pmd_val(pmd) & _PAGE_PMD_HUGE) { + if (is_huge_zero_pmd(pmd)) + mm->context.hugetlb_pte_count++; + else + mm->context.thp_pte_count++; + } else { + if (is_huge_zero_pmd(orig)) + mm->context.hugetlb_pte_count--; + else + mm->context.thp_pte_count--; + } /* Do not try to allocate the TSB hash table if we * don't have one already. We have various locks held @@ -178,16 +208,67 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, } if (!pmd_none(orig)) { - bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0); - addr &= HPAGE_MASK; - if (pmd_val(orig) & PMD_ISHUGE) - tlb_batch_add_one(mm, addr, exec); - else - tlb_batch_pmd_scan(mm, addr, orig, exec); + if (pmd_trans_huge(orig)) { + pte_t orig_pte = __pte(pmd_val(orig)); + bool exec = pte_exec(orig_pte); + + tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT); + tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec, + REAL_HPAGE_SHIFT); + } else { + tlb_batch_pmd_scan(mm, addr, orig); + } } } +void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + pmd_t orig = *pmdp; + + *pmdp = pmd; + __set_pmd_acct(mm, addr, orig, pmd); +} + +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old; + + do { + old = *pmdp; + } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd); + __set_pmd_acct(vma->vm_mm, address, old, pmd); + + return old; +} + +/* + * This routine is only called when splitting a THP + */ +pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t old, entry; + + VM_WARN_ON_ONCE(!pmd_present(*pmdp)); + entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID); + old = pmdp_establish(vma, address, pmdp, entry); + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + /* + * set_pmd_at() will not be called in a way to decrement + * thp_pte_count when splitting a THP, so do it now. + * Sanity check pmd before doing the actual decrement. + */ + if ((pmd_val(entry) & _PAGE_PMD_HUGE) && + !is_huge_zero_pmd(entry)) + (vma->vm_mm)->context.thp_pte_count--; + + return old; +} + void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { @@ -196,11 +277,11 @@ void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, assert_spin_locked(&mm->page_table_lock); /* FIFO */ - if (!mm->pmd_huge_pte) + if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(lh); else - list_add(lh, (struct list_head *) mm->pmd_huge_pte); - mm->pmd_huge_pte = pgtable; + list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); + pmd_huge_pte(mm, pmdp) = pgtable; } pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) @@ -211,12 +292,12 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) assert_spin_locked(&mm->page_table_lock); /* FIFO */ - pgtable = mm->pmd_huge_pte; + pgtable = pmd_huge_pte(mm, pmdp); lh = (struct list_head *) pgtable; if (list_empty(lh)) - mm->pmd_huge_pte = NULL; + pmd_huge_pte(mm, pmdp) = NULL; else { - mm->pmd_huge_pte = (pgtable_t) lh->next; + pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; list_del(lh); } pte_val(pgtable[0]) = 0; |
