summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c8
-rw-r--r--arch/mips/mm/cache.c15
-rw-r--r--arch/mips/mm/cex-gen.S2
-rw-r--r--arch/mips/mm/dma-noncoherent.c2
-rw-r--r--arch/mips/mm/fault.c1
-rw-r--r--arch/mips/mm/init.c17
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/pgtable-64.c2
-rw-r--r--arch/mips/mm/tlb-r3k.c6
-rw-r--r--arch/mips/mm/tlb-r4k.c8
-rw-r--r--arch/mips/mm/tlbex.c4
11 files changed, 34 insertions, 35 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 187d1c16361c..10413b6f6662 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1485,10 +1485,6 @@ static void loongson3_sc_init(void)
return;
}
-extern int r5k_sc_init(void);
-extern int rm7k_sc_init(void);
-extern int mips_sc_init(void);
-
static void setup_scache(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -1654,7 +1650,7 @@ static void coherency_setup(void)
/*
* c0_status.cu=0 specifies that updates by the sc instruction use
- * the coherency mode specified by the TLB; 1 means cachable
+ * the coherency mode specified by the TLB; 1 means cacheable
* coherent update on write will be used. Not all processors have
* this bit and; some wire it to zero, others like Toshiba had the
* silly idea of putting something else there ...
@@ -1828,7 +1824,7 @@ static struct notifier_block r4k_cache_pm_notifier_block = {
.notifier_call = r4k_cache_pm_notifier,
};
-int __init r4k_cache_init_pm(void)
+static int __init r4k_cache_init_pm(void)
{
return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block);
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7f830634dbe7..df1ced4fc3b5 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -205,22 +205,13 @@ static inline void setup_protection_map(void)
void cpu_cache_init(void)
{
- if (cpu_has_3k_cache) {
- extern void __weak r3k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
r3k_cache_init();
- }
- if (cpu_has_4k_cache) {
- extern void __weak r4k_cache_init(void);
-
+ if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
r4k_cache_init();
- }
-
- if (cpu_has_octeon_cache) {
- extern void __weak octeon_cache_init(void);
+ if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
octeon_cache_init();
- }
setup_protection_map();
}
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index 45dff5cd4b8e..e528583d1331 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -25,7 +25,7 @@
* This is a very bad place to be. Our cache error
* detection has triggered. If we have write-back data
* in the cache, we may not be able to recover. As a
- * first-order desperate measure, turn off KSEG0 cacheing.
+ * first-order desperate measure, turn off KSEG0 caching.
*/
mfc0 k0,CP0_CONFIG
li k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 3c4fc97b9f39..0f3cec663a12 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+ bool coherent)
{
dev->dma_coherent = coherent;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index d7878208bd3f..aaa9a242ebba 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -26,6 +26,7 @@
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
+#include <asm/traps.h>
#include <linux/kdebug.h>
int show_unhandled_signals = 1;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 5dcb525a8995..39f129205b0c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -38,6 +38,7 @@
#include <asm/dma.h>
#include <asm/maar.h>
#include <asm/mmu_context.h>
+#include <asm/mmzone.h>
#include <asm/sections.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -421,8 +422,17 @@ void __init paging_init(void)
" %ldk highmem ignored\n",
(highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+
+ max_mapnr = max_low_pfn;
+ } else if (highend_pfn) {
+ max_mapnr = highend_pfn;
+ } else {
+ max_mapnr = max_low_pfn;
}
+#else
+ max_mapnr = max_low_pfn;
#endif
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
free_area_init(max_zone_pfns);
}
@@ -458,13 +468,6 @@ void __init mem_init(void)
*/
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
-#ifdef CONFIG_HIGHMEM
- max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
-#else
- max_mapnr = max_low_pfn;
-#endif
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
maar_init();
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index b6dad2fd5575..d8243d61ef32 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -72,6 +72,10 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
flags == _CACHE_UNCACHED)
return (void __iomem *) CKSEG1ADDR(phys_addr);
+ /* Early remaps should use the unmapped regions til' VM is available */
+ if (WARN_ON_ONCE(!slab_is_available()))
+ return NULL;
+
/*
* Don't allow anybody to remap RAM that may be allocated by the page
* allocator, since that could lead to races & data clobbering.
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index c76d21f7dffb..1e544827dea9 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -89,6 +89,7 @@ void pud_init(void *addr)
}
#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t mk_pmd(struct page *page, pgprot_t prot)
{
pmd_t pmd;
@@ -103,6 +104,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
{
*pmdp = pmd;
}
+#endif
void __init pagetable_init(void)
{
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 53dfa2b9316b..173f7b36033b 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -23,11 +23,11 @@
#include <asm/io.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/setup.h>
+#include <asm/tlbex.h>
#undef DEBUG_TLB
-extern void build_tlb_refill_handler(void);
-
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
@@ -183,7 +183,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
int idx, pid;
/*
- * Handle debugger faulting in for debugee.
+ * Handle debugger faulting in for debuggee.
*/
if (current->active_mm != vma->vm_mm)
return;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 93c2d695588a..4106084e57d7 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -22,9 +22,9 @@
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/tlb.h>
+#include <asm/tlbex.h>
#include <asm/tlbmisc.h>
-
-extern void build_tlb_refill_handler(void);
+#include <asm/setup.h>
/*
* LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
@@ -301,7 +301,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
int idx, pid;
/*
- * Handle debugger faulting in for debugee.
+ * Handle debugger faulting in for debuggee.
*/
if (current->active_mm != vma->vm_mm)
return;
@@ -458,6 +458,7 @@ EXPORT_SYMBOL(has_transparent_hugepage);
int temp_tlb_entry;
+#ifndef CONFIG_64BIT
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
@@ -496,6 +497,7 @@ out:
local_irq_restore(flags);
return ret;
}
+#endif
static int ntlb;
static int __init set_ntlb(char *str)
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b4e1c783e617..4017fa0e2f68 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -789,7 +789,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
if (check_for_high_segbits) {
/*
- * The kernel currently implicitely assumes that the
+ * The kernel currently implicitly assumes that the
* MIPS SEGBITS parameter for the processor is
* (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
* allocate virtual addresses outside the maximum
@@ -1715,7 +1715,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
/*
* Check if PTE is present, if not then jump to LABEL. PTR points to
* the page table where this PTE is located, PTE will be re-loaded
- * with it's original value.
+ * with its original value.
*/
static void
build_pte_present(u32 **p, struct uasm_reloc **r,