summaryrefslogtreecommitdiff
path: root/arch/riscv/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include/asm/pgtable.h')
-rw-r--r--arch/riscv/include/asm/pgtable.h160
1 files changed, 140 insertions, 20 deletions
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 6afd6bb4882e..f19240fd018e 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -12,7 +12,11 @@
#include <asm/pgtable-bits.h>
#ifndef CONFIG_MMU
-#define KERNEL_LINK_ADDR PAGE_OFFSET
+#ifdef CONFIG_RELOCATABLE
+#define KERNEL_LINK_ADDR UL(0)
+#else
+#define KERNEL_LINK_ADDR _AC(CONFIG_PHYS_RAM_BASE, UL)
+#endif
#define KERN_VIRT_SIZE (UL(-1))
#else
@@ -55,6 +59,9 @@
#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+#else
+#define MODULES_VADDR VMALLOC_START
+#define MODULES_END VMALLOC_END
#endif
/*
@@ -84,7 +91,7 @@
* Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
* is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
*/
-#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
+#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn)
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
@@ -104,19 +111,13 @@
#endif
-#ifdef CONFIG_XIP_KERNEL
-#define XIP_OFFSET SZ_32M
-#define XIP_OFFSET_MASK (SZ_32M - 1)
-#else
-#define XIP_OFFSET 0
-#endif
-
#ifndef __ASSEMBLY__
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
#include <asm/compat.h>
+#include <asm/cpufeature.h>
#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
@@ -139,11 +140,14 @@
#ifdef CONFIG_XIP_KERNEL
#define XIP_FIXUP(addr) ({ \
+ extern char _sdata[], _start[], _end[]; \
+ uintptr_t __rom_start_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_sdata - (uintptr_t)&_start; \
+ uintptr_t __rom_end_data = CONFIG_XIP_PHYS_ADDR \
+ + (uintptr_t)&_end - (uintptr_t)&_start; \
uintptr_t __a = (uintptr_t)(addr); \
- (__a >= CONFIG_XIP_PHYS_ADDR && \
- __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
- __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
- __a; \
+ (__a >= __rom_start_data && __a < __rom_end_data) ? \
+ __a - __rom_start_data + CONFIG_PHYS_RAM_BASE : __a; \
})
#else
#define XIP_FIXUP(addr) (addr)
@@ -162,7 +166,7 @@ struct pt_alloc_ops {
#endif
};
-extern struct pt_alloc_ops pt_ops __initdata;
+extern struct pt_alloc_ops pt_ops __meminitdata;
#ifdef CONFIG_MMU
/* Number of PGD entries that a user-mode program can use */
@@ -285,7 +289,6 @@ static inline pte_t pud_pte(pud_t pud)
}
#ifdef CONFIG_RISCV_ISA_SVNAPOT
-#include <asm/cpufeature.h>
static __always_inline bool has_svnapot(void)
{
@@ -340,13 +343,32 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
}
-#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+#define pte_pgprot pte_pgprot
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
static inline int pte_present(pte_t pte)
{
return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
+#define pte_accessible pte_accessible
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
+{
+ if (pte_val(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_val(a) & _PAGE_PROT_NONE) &&
+ atomic_read(&mm->tlb_flush_pending))
+ return true;
+
+ return false;
+}
+
static inline int pte_none(pte_t pte)
{
return (pte_val(pte) == 0);
@@ -387,6 +409,13 @@ static inline int pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL;
}
+#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
+static inline int pte_devmap(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DEVMAP;
+}
+#endif
+
/* static inline pte_t pte_rdprotect(pte_t pte) */
static inline pte_t pte_wrprotect(pte_t pte)
@@ -428,6 +457,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
+static inline pte_t pte_mkdevmap(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DEVMAP);
+}
+
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte;
@@ -473,6 +507,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
+ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1)
+ : : : : svvptc);
+
/*
* The kernel assumes that TLBs don't cache invalid entries, but
* in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
@@ -482,12 +519,19 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf,
*/
while (nr--)
local_flush_tlb_page(address + nr * PAGE_SIZE);
+
+svvptc:;
+ /*
+ * Svvptc guarantees that the new valid pte will be visible within
+ * a bounded timeframe, so when the uarch does not cache invalid
+ * entries, we don't have to do anything.
+ */
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
-#define __HAVE_ARCH_UPDATE_MMU_TLB
-#define update_mmu_tlb update_mmu_cache
+#define update_mmu_tlb_range(vma, addr, ptep, nr) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
@@ -622,6 +666,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
}
/*
+ * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By
+ * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
+ * DT.
+ */
+#define arch_has_hw_pte_young arch_has_hw_pte_young
+static inline bool arch_has_hw_pte_young(void)
+{
+ return riscv_has_extension_unlikely(RISCV_ISA_EXT_SVADU);
+}
+
+/*
* THP functions
*/
static inline pmd_t pte_pmd(pte_t pte)
@@ -629,6 +684,11 @@ static inline pmd_t pte_pmd(pte_t pte)
return __pmd(pte_val(pte));
}
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud(pte_val(pte));
+}
+
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd;
@@ -648,11 +708,24 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
}
+#define pmd_pgprot pmd_pgprot
+static inline pgprot_t pmd_pgprot(pmd_t pmd)
+{
+ return pte_pgprot(pmd_pte(pmd));
+}
+
+#define pud_pgprot pud_pgprot
+static inline pgprot_t pud_pgprot(pud_t pud)
+{
+ return pte_pgprot(pud_pte(pud));
+}
+
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
@@ -717,6 +790,35 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
}
+static inline pmd_t pmd_mkdevmap(pmd_t pmd)
+{
+ return pte_pmd(pte_mkdevmap(pmd_pte(pmd)));
+}
+
+#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
+static inline bool pmd_special(pmd_t pmd)
+{
+ return pte_special(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkspecial(pmd_t pmd)
+{
+ return pte_pmd(pte_mkspecial(pmd_pte(pmd)));
+}
+#endif
+
+#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
+static inline bool pud_special(pud_t pud)
+{
+ return pte_special(pud_pte(pud));
+}
+
+static inline pud_t pud_mkspecial(pud_t pud)
+{
+ return pte_pud(pte_mkspecial(pud_pte(pud)));
+}
+#endif
+
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
@@ -876,7 +978,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
-#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
@@ -888,7 +990,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#else
#define TASK_SIZE FIXADDR_START
-#define TASK_SIZE_MIN TASK_SIZE
#endif
#else /* CONFIG_MMU */
@@ -924,6 +1025,25 @@ void misc_mem_init(void);
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_RISCV_PGTABLE_H */