diff options
Diffstat (limited to 'arch/riscv/include/asm/pgalloc.h')
| -rw-r--r-- | arch/riscv/include/asm/pgalloc.h | 67 |
1 files changed, 49 insertions, 18 deletions
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 11823004b87a..770ce18a7328 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -8,10 +8,10 @@ #define _ASM_RISCV_PGALLOC_H #include <linux/mm.h> +#include <asm/sbi.h> #include <asm/tlb.h> #ifdef CONFIG_MMU -#define __HAVE_ARCH_PUD_ALLOC_ONE #define __HAVE_ARCH_PUD_FREE #include <asm-generic/pgalloc.h> @@ -59,13 +59,24 @@ static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, } } -#define pud_alloc_one pud_alloc_one -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) { - if (pgtable_l4_enabled) - return __pud_alloc_one(mm, addr); + if (pgtable_l5_enabled) { + unsigned long pfn = virt_to_pfn(p4d); + + set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, + p4d_t *p4d) +{ + if (pgtable_l5_enabled) { + unsigned long pfn = virt_to_pfn(p4d); - return NULL; + set_pgd_safe(pgd, + __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } } #define pud_free pud_free @@ -75,35 +86,55 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) __pud_free(mm, pud); } -#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) +static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr) +{ + if (pgtable_l4_enabled) + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); +} + +static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr) +{ + if (pgtable_l5_enabled) + tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); +} #endif /* __PAGETABLE_PMD_FOLDED */ +static inline void sync_kernel_mappings(pgd_t *pgd) +{ + memcpy(pgd + USER_PTRS_PER_PGD, + init_mm.pgd + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +} + static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd = __pgd_alloc(mm, 0); if (likely(pgd != NULL)) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* Copy kernel mappings */ - memcpy(pgd + USER_PTRS_PER_PGD, - init_mm.pgd + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + sync_kernel_mappings(pgd); } return pgd; } #ifndef __PAGETABLE_PMD_FOLDED -#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); +} #endif /* __PAGETABLE_PMD_FOLDED */ -#define __pte_free_tlb(tlb, pte, buf) \ -do { \ - pgtable_pte_page_dtor(pte); \ - tlb_remove_page((tlb), pte); \ -} while (0) +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + tlb_remove_ptdesc(tlb, page_ptdesc(pte)); +} #endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_PGALLOC_H */ |
