summaryrefslogtreecommitdiff
path: root/arch/mips/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/init.c')
-rw-r--r--arch/mips/mm/init.c94
1 files changed, 46 insertions, 48 deletions
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 5a8002839550..a673d3d68254 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -31,6 +31,7 @@
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/initrd.h>
+#include <linux/execmem.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
@@ -38,6 +39,7 @@
#include <asm/dma.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -57,24 +59,16 @@ EXPORT_SYMBOL(zero_page_mask);
/*
* Not static inline because used by IP27 special magic initialization code
*/
-void setup_zero_pages(void)
+static void __init setup_zero_pages(void)
{
- unsigned int order, i;
- struct page *page;
+ unsigned int order;
if (cpu_has_vce)
order = 3;
else
order = 0;
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- for (i = 0; i < (1 << order); i++, page++)
- mark_page_reserved(page);
+ empty_zero_page = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE << order, PAGE_SIZE);
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
}
@@ -88,7 +82,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
pte_t pte;
int tlbidx;
- BUG_ON(Page_dcache_dirty(page));
+ BUG_ON(folio_test_dcache_dirty(page_folio(page)));
preempt_disable();
pagefault_disable();
@@ -169,11 +163,12 @@ void kunmap_coherent(void)
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
void *vfrom, *vto;
vto = kmap_atomic(to);
if (cpu_has_dc_aliases &&
- page_mapcount(from) && !Page_dcache_dirty(from)) {
+ folio_mapped(src) && !folio_test_dcache_dirty(src)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
@@ -194,15 +189,17 @@ void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases &&
- page_mapcount(page) && !Page_dcache_dirty(page)) {
+ folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
}
if (vma->vm_flags & VM_EXEC)
flush_cache_page(vma, vaddr, page_to_pfn(page));
@@ -212,15 +209,17 @@ void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ struct folio *folio = page_folio(page);
+
if (cpu_has_dc_aliases &&
- page_mapcount(page) && !Page_dcache_dirty(page)) {
+ folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
+ folio_set_dcache_dirty(folio);
}
}
EXPORT_SYMBOL_GPL(copy_from_user_page);
@@ -426,44 +425,16 @@ void __init paging_init(void)
static struct kcore_list kcore_kseg0;
#endif
-static inline void __init mem_init_free_highmem(void)
-{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
- if (cpu_has_dc_aliases)
- return;
-
- for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
- struct page *page = pfn_to_page(tmp);
-
- if (!memblock_is_memory(PFN_PHYS(tmp)))
- SetPageReserved(page);
- else
- free_highmem_page(page);
- }
-#endif
-}
-
-void __init mem_init(void)
+void __init arch_mm_preinit(void)
{
/*
- * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
+ * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
* bits to hold a full 32b physical address on MIPS32 systems.
*/
- BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
-
-#ifdef CONFIG_HIGHMEM
- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
- max_mapnr = max_low_pfn;
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
maar_init();
- memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
- mem_init_free_highmem();
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
@@ -473,6 +444,11 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
+#else /* CONFIG_NUMA */
+void __init arch_mm_preinit(void)
+{
+ setup_zero_pages(); /* This comes from node 0 */
+}
#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
@@ -568,3 +544,25 @@ EXPORT_SYMBOL_GPL(invalid_pmd_table);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
+
+#ifdef CONFIG_EXECMEM
+#ifdef MODULES_VADDR
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif
+#endif /* CONFIG_EXECMEM */