summaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/cache.c322
-rw-r--r--arch/arc/mm/dma.c3
-rw-r--r--arch/arc/mm/extable.c11
-rw-r--r--arch/arc/mm/fault.c18
-rw-r--r--arch/arc/mm/init.c33
-rw-r--r--arch/arc/mm/ioremap.c55
-rw-r--r--arch/arc/mm/mmap.c26
-rw-r--r--arch/arc/mm/tlb.c137
-rw-r--r--arch/arc/mm/tlbex.S8
9 files changed, 166 insertions, 447 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 55c6de138eae..7d2f93dc1e91 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -28,6 +28,10 @@ int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
+static struct cpuinfo_arc_cache {
+ unsigned int sz_k, line_len, colors;
+} ic_info, dc_info, slc_info;
+
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page);
@@ -35,78 +39,24 @@ void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
-char *arc_cache_mumbojumbo(int c, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc_cache *p;
-
-#define PR_CACHE(p, cfg, str) \
- if (!(p)->line_len) \
- n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
- else \
- n += scnprintf(buf + n, len - n, \
- str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
- (p)->sz_k, (p)->assoc, (p)->line_len, \
- (p)->vipt ? "VIPT" : "PIPT", \
- (p)->alias ? " aliasing" : "", \
- IS_USED_CFG(cfg));
-
- PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
- PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
-
- p = &cpuinfo_arc700[c].slc;
- if (p->line_len)
- n += scnprintf(buf + n, len - n,
- "SLC\t\t: %uK, %uB Line%s\n",
- p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
-
- n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
- perip_base,
- IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
-
- return buf;
-}
-
-/*
- * Read the Cache Build Confuration Registers, Decode them and save into
- * the cpuinfo structure for later use.
- * No Validation done here, simply read/convert the BCRs
- */
-static void read_decode_cache_bcr_arcv2(int cpu)
+static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
{
- struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
+ struct cpuinfo_arc_cache *p_slc = &slc_info;
+ struct bcr_identity ident;
struct bcr_generic sbcr;
-
- struct bcr_slc_cfg {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:24, way:2, lsz:2, sz:4;
-#else
- unsigned int sz:4, lsz:2, way:2, pad:24;
-#endif
- } slc_cfg;
-
- struct bcr_clust_cfg {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
-#else
- unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
-#endif
- } cbcr;
-
- struct bcr_volatile {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:4, limit:4, pad:22, order:1, disable:1;
-#else
- unsigned int disable:1, order:1, pad:22, limit:4, start:4;
-#endif
- } vol;
-
+ struct bcr_clust_cfg cbcr;
+ struct bcr_volatile vol;
+ int n = 0;
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
+ struct bcr_slc_cfg slc_cfg;
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+ n += scnprintf(buf + n, len - n,
+ "SLC\t\t: %uK, %uB Line%s\n",
+ p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
@@ -129,70 +79,82 @@ static void read_decode_cache_bcr_arcv2(int cpu)
ioc_enable = 0;
}
+ READ_BCR(AUX_IDENTITY, ident);
+
/* HS 2.0 didn't have AUX_VOL */
- if (cpuinfo_arc700[cpu].core.family > 0x51) {
+ if (ident.family > 0x51) {
READ_BCR(AUX_VOL, vol);
perip_base = vol.start << 28;
/* HS 3.0 has limit and strict-ordering fields */
- if (cpuinfo_arc700[cpu].core.family > 0x52)
+ if (ident.family > 0x52)
perip_end = (vol.limit << 28) - 1;
}
+
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
+
+ return n;
}
-void read_decode_cache_bcr(void)
+int arc_cache_mumbojumbo(int c, char *buf, int len)
{
- struct cpuinfo_arc_cache *p_ic, *p_dc;
- unsigned int cpu = smp_processor_id();
- struct bcr_cache {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
-#else
- unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
-#endif
- } ibcr, dbcr;
+ struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
+ struct bcr_cache ibcr, dbcr;
+ int vipt, assoc;
+ int n = 0;
- p_ic = &cpuinfo_arc700[cpu].icache;
READ_BCR(ARC_REG_IC_BCR, ibcr);
-
if (!ibcr.ver)
goto dc_chk;
- if (ibcr.ver <= 3) {
+ if (is_isa_arcompact() && (ibcr.ver <= 3)) {
BUG_ON(ibcr.config != 3);
- p_ic->assoc = 2; /* Fixed to 2w set assoc */
- } else if (ibcr.ver >= 4) {
- p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
+ assoc = 2; /* Fixed to 2w set assoc */
+ } else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
+ assoc = 1 << ibcr.config; /* 1,2,4,8 */
}
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
- p_ic->vipt = 1;
- p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
+ p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
+
+ n += scnprintf(buf + n, len - n,
+ "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
+ p_ic->sz_k, assoc, p_ic->line_len,
+ p_ic->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
dc_chk:
- p_dc = &cpuinfo_arc700[cpu].dcache;
READ_BCR(ARC_REG_DC_BCR, dbcr);
-
if (!dbcr.ver)
goto slc_chk;
- if (dbcr.ver <= 3) {
+ if (is_isa_arcompact() && (dbcr.ver <= 3)) {
BUG_ON(dbcr.config != 2);
- p_dc->assoc = 4; /* Fixed to 4w set assoc */
- p_dc->vipt = 1;
- p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
- } else if (dbcr.ver >= 4) {
- p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
- p_dc->vipt = 0;
- p_dc->alias = 0; /* PIPT so can't VIPT alias */
+ vipt = 1;
+ assoc = 4; /* Fixed to 4w set assoc */
+ p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
+ } else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
+ vipt = 0;
+ assoc = 1 << dbcr.config; /* 1,2,4,8 */
+ p_dc->colors = 1; /* PIPT so can't VIPT alias */
}
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
+ n += scnprintf(buf + n, len - n,
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
+ p_dc->sz_k, assoc, p_dc->line_len,
+ vipt ? "VIPT" : "PIPT",
+ IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
+
slc_chk:
if (is_isa_arcv2())
- read_decode_cache_bcr_arcv2(cpu);
+ n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
+
+ return n;
}
/*
@@ -581,7 +543,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
-noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
+static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -644,7 +606,7 @@ noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
-noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
+static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -740,47 +702,16 @@ static inline void arc_slc_enable(void)
* Exported APIs
*/
-/*
- * Handle cache congruency of kernel and userspace mappings of page when kernel
- * writes-to/reads-from
- *
- * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
- * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
- * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
- * -In SMP, if hardware caches are coherent
- *
- * There's a corollary case, where kernel READs from a userspace mapped page.
- * If the U-mapping is not congruent to K-mapping, former needs flushing.
- */
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
- struct address_space *mapping;
-
- if (!cache_is_vipt_aliasing()) {
- clear_bit(PG_dc_clean, &page->flags);
- return;
- }
-
- /* don't handle anon pages here */
- mapping = page_mapping_file(page);
- if (!mapping)
- return;
-
- /*
- * pagecache page, file not yet mapped to userspace
- * Make a note that K-mapping is dirty
- */
- if (!mapping_mapped(mapping)) {
- clear_bit(PG_dc_clean, &page->flags);
- } else if (page_mapcount(page)) {
-
- /* kernel reading from page with U-mapping */
- phys_addr_t paddr = (unsigned long)page_address(page);
- unsigned long vaddr = page->index << PAGE_SHIFT;
+ clear_bit(PG_dc_clean, &folio->flags.f);
+ return;
+}
+EXPORT_SYMBOL(flush_dcache_folio);
- if (addr_not_cache_congruent(paddr, vaddr))
- __flush_dcache_page(paddr, vaddr);
- }
+void flush_dcache_page(struct page *page)
+{
+ return flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);
@@ -921,18 +852,18 @@ void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
}
/* wrapper to compile time eliminate alignment checks in flush loop */
-void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
{
- __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
+ __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
}
/*
* wrapper to clearout kernel or userspace mappings of a page
* For kernel mappings @vaddr == @paddr
*/
-void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
{
- __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
+ __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
}
noinline void flush_cache_all(void)
@@ -948,89 +879,18 @@ noinline void flush_cache_all(void)
}
-#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
-
-void flush_cache_mm(struct mm_struct *mm)
-{
- flush_cache_all();
-}
-
-void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
- unsigned long pfn)
-{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- u_vaddr &= PAGE_MASK;
-
- __flush_dcache_page(paddr, u_vaddr);
-
- if (vma->vm_flags & VM_EXEC)
- __inv_icache_page(paddr, u_vaddr);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- flush_cache_all();
-}
-
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
- unsigned long u_vaddr)
-{
- /* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
- __flush_dcache_page((phys_addr_t)page_address(page),
- (phys_addr_t)page_address(page));
-
-}
-
-#endif
-
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
+ struct folio *dst = page_folio(to);
void *kfrom = kmap_atomic(from);
void *kto = kmap_atomic(to);
- int clean_src_k_mappings = 0;
-
- /*
- * If SRC page was already mapped in userspace AND it's U-mapping is
- * not congruent with K-mapping, sync former to physical page so that
- * K-mapping in memcpy below, sees the right data
- *
- * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
- * equally valid for SRC page as well
- *
- * For !VIPT cache, all of this gets compiled out as
- * addr_not_cache_congruent() is 0
- */
- if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
- __flush_dcache_page((unsigned long)kfrom, u_vaddr);
- clean_src_k_mappings = 1;
- }
copy_page(kto, kfrom);
- /*
- * Mark DST page K-mapping as dirty for a later finalization by
- * update_mmu_cache(). Although the finalization could have been done
- * here as well (given that both vaddr/paddr are available).
- * But update_mmu_cache() already has code to do that for other
- * non copied user pages (e.g. read faults which wire in pagecache page
- * directly).
- */
- clear_bit(PG_dc_clean, &to->flags);
-
- /*
- * if SRC was already usermapped and non-congruent to kernel mapping
- * sync the kernel mapping back to physical page
- */
- if (clean_src_k_mappings) {
- __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
- set_bit(PG_dc_clean, &from->flags);
- } else {
- clear_bit(PG_dc_clean, &from->flags);
- }
+ clear_bit(PG_dc_clean, &dst->flags.f);
+ clear_bit(PG_dc_clean, &src->flags.f);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@@ -1038,8 +898,9 @@ void copy_user_highpage(struct page *to, struct page *from,
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{
+ struct folio *folio = page_folio(page);
clear_page(to);
- clear_bit(PG_dc_clean, &page->flags);
+ clear_bit(PG_dc_clean, &folio->flags.f);
}
EXPORT_SYMBOL(clear_user_page);
@@ -1069,7 +930,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
-noinline void __init arc_ioc_setup(void)
+static noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
@@ -1131,12 +992,10 @@ noinline void __init arc_ioc_setup(void)
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
-void __init arc_cache_init_master(void)
+static noinline void __init arc_cache_init_master(void)
{
- unsigned int __maybe_unused cpu = smp_processor_id();
-
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
- struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ struct cpuinfo_arc_cache *ic = &ic_info;
if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
@@ -1149,14 +1008,14 @@ void __init arc_cache_init_master(void)
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
- if (is_isa_arcv2() && ic->alias)
+ if (is_isa_arcv2() && ic->colors > 1)
_cache_line_loop_ic_fn = __cache_line_loop_v3;
else
_cache_line_loop_ic_fn = __cache_line_loop;
}
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
- struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+ struct cpuinfo_arc_cache *dc = &dc_info;
if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
@@ -1166,18 +1025,8 @@ void __init arc_cache_init_master(void)
dc->line_len, L1_CACHE_BYTES);
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
- if (is_isa_arcompact()) {
- int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
- int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
-
- if (dc->alias) {
- if (!handled)
- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- if (CACHE_COLORS_NUM != num_colors)
- panic("CACHE_COLORS_NUM not optimized for config\n");
- } else if (!dc->alias && handled) {
- panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- }
+ if (is_isa_arcompact() && dc->colors > 1) {
+ panic("Aliasing VIPT cache not supported\n");
}
}
@@ -1218,9 +1067,6 @@ void __init arc_cache_init_master(void)
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
- char str[256];
-
- pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
if (!cpu)
arc_cache_init_master();
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 2a7fbbb83b70..6b85e94f3275 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
/*
* Plug in direct dma map ops.
*/
-void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+void arch_setup_dma_ops(struct device *dev, bool coherent)
{
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
index 4e14c4244ea2..88fa3a4d4906 100644
--- a/arch/arc/mm/extable.c
+++ b/arch/arc/mm/extable.c
@@ -22,14 +22,3 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
-
-#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-
-unsigned long arc_clear_user_noinline(void __user *to,
- unsigned long n)
-{
- return __arc_clear_user(to, n);
-}
-EXPORT_SYMBOL(arc_clear_user_noinline);
-
-#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 5ca59a482632..95119a5e7761 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -13,6 +13,7 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/mm_types.h>
+#include <asm/entry.h>
#include <asm/mmu.h>
/*
@@ -99,10 +100,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (faulthandler_disabled() || !mm)
goto no_context;
- if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
+ if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
- else if ((regs->ecr_vec == ECR_V_PROTV) &&
- (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
+ else if ((regs->ecr.vec == ECR_V_PROTV) &&
+ (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
flags = FAULT_FLAG_DEFAULT;
@@ -113,15 +114,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (unlikely(address < vma->vm_start)) {
- if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
- goto bad_area;
- }
+ goto bad_area_nosemaphore;
/*
* vm_area is good, now check permissions for this memory access
@@ -161,6 +156,7 @@ retry:
bad_area:
mmap_read_unlock(mm);
+bad_area_nosemaphore:
/*
* Major/minor page fault accounting
* (in case of retry we only land here once)
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index ce4e939a7f07..a73cc94f806e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -15,6 +15,7 @@
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/arcregs.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
@@ -74,11 +75,6 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
base, TO_MB(size), !in_use ? "Not used":"");
}
-bool arch_has_descending_max_zone_pfns(void)
-{
- return !IS_ENABLED(CONFIG_ARC_HAS_PAE40);
-}
-
/*
* First memory setup routine called from setup_arch()
* 1. setup swapper's mm @init_mm
@@ -92,7 +88,7 @@ void __init setup_arch_memory(void)
setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
- min_low_pfn = virt_to_pfn(CONFIG_LINUX_RAM_BASE);
+ min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
/* Last usable page of low mem */
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
@@ -154,41 +150,18 @@ void __init setup_arch_memory(void)
*/
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
- high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
-
arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
-
-#else /* CONFIG_HIGHMEM */
- /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
- max_mapnr = max_low_pfn - min_low_pfn;
-
#endif /* CONFIG_HIGHMEM */
free_area_init(max_zone_pfn);
}
-static void __init highmem_init(void)
+void __init arch_mm_preinit(void)
{
#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
-
memblock_phys_free(high_mem_start, high_mem_sz);
- for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
#endif
-}
-
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
-{
- memblock_free_all();
- highmem_init();
BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index 712c2311daef..fd8897a0e52c 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mm.h>
-#include <linux/slab.h>
#include <linux/cache.h>
static inline bool arc_uncached_addr_space(phys_addr_t paddr)
@@ -25,13 +24,6 @@ static inline bool arc_uncached_addr_space(phys_addr_t paddr)
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
{
- phys_addr_t end;
-
- /* Don't allow wraparound or zero size */
- end = paddr + size - 1;
- if (!size || (end < paddr))
- return NULL;
-
/*
* If the region is h/w uncached, MMU mapping can be elided as optim
* The cast to u32 is fine as this region can only be inside 4GB
@@ -40,7 +32,7 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
return (void __iomem *)(u32)paddr;
return ioremap_prot(paddr, size,
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+ pgprot_noncached(PAGE_KERNEL));
}
EXPORT_SYMBOL(ioremap);
@@ -51,55 +43,20 @@ EXPORT_SYMBOL(ioremap);
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/
-void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
- unsigned long flags)
+void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
+ pgprot_t prot)
{
- unsigned int off;
- unsigned long vaddr;
- struct vm_struct *area;
- phys_addr_t end;
- pgprot_t prot = __pgprot(flags);
-
- /* Don't allow wraparound, zero size */
- end = paddr + size - 1;
- if ((!size) || (end < paddr))
- return NULL;
-
- /* An early platform driver might end up here */
- if (!slab_is_available())
- return NULL;
-
/* force uncached */
- prot = pgprot_noncached(prot);
-
- /* Mappings have to be page-aligned */
- off = paddr & ~PAGE_MASK;
- paddr &= PAGE_MASK_PHYS;
- size = PAGE_ALIGN(end + 1) - paddr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- area->phys_addr = paddr;
- vaddr = (unsigned long)area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
- vunmap((void __force *)vaddr);
- return NULL;
- }
- return (void __iomem *)(off + (char __iomem *)vaddr);
+ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
}
EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(const volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
return;
- vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+ generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index fce5fa2b4f52..2185afe8d59f 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -14,10 +14,6 @@
#include <asm/cacheflush.h>
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
- (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
-
/*
* Ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches.
@@ -27,25 +23,18 @@
*/
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- int do_align = 0;
- int aliasing = cache_is_vipt_aliasing();
- struct vm_unmapped_area_info info;
-
- /*
- * We only need to do colour alignment if D cache aliases.
- */
- if (aliasing)
- do_align = filp || (flags & MAP_SHARED);
+ struct vm_unmapped_area_info info = {};
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
- if (aliasing && flags & MAP_SHARED &&
+ if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
@@ -55,10 +44,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
+ addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
@@ -66,11 +52,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 5f71445f26bd..ed6915ba76ec 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -18,7 +18,9 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
-static int __read_mostly pae_exists;
+static struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
+} mmuinfo;
/*
* Utility Routine to erase a J-TLB entry
@@ -131,7 +133,7 @@ static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
noinline void local_flush_tlb_all(void)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
unsigned int entry;
int num_tlb = mmu->sets * mmu->ways;
@@ -210,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long flags;
/* If range @start to @end is more than 32 TLB entries deep,
- * its better to move to a new ASID rather than searching for
+ * it's better to move to a new ASID rather than searching for
* individual entries and then shooting them down
*
* The calc above is rough, doesn't account for unaligned parts,
@@ -389,7 +391,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
-void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
+static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
@@ -406,7 +408,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
- * Lets see the use cases when current->mm != vma->mm and we land here
+ * Let's see the use cases when current->mm != vma->mm and we land here
* 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
* Here VM wants to pre-install a TLB entry for user stack while
* current->mm still points to pre-execve mm (hence the condition).
@@ -467,8 +469,8 @@ void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
* Note that flush (when done) involves both WBACK - so physical page is
* in sync as well as INV - so any non-congruent aliases don't remain
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
- pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
@@ -476,30 +478,28 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
create_tlb(vma, vaddr, ptep);
- if (page == ZERO_PAGE(0)) {
+ if (page == ZERO_PAGE(0))
return;
- }
/*
- * Exec page : Independent of aliasing/page-color considerations,
- * since icache doesn't snoop dcache on ARC, any dirty
- * K-mapping of a code page needs to be wback+inv so that
- * icache fetch by userspace sees code correctly.
- * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
- * so userspace sees the right data.
- * (Avoids the flush for Non-exec + congruent mapping case)
+ * For executable pages, since icache doesn't snoop dcache, any
+ * dirty K-mapping of a code page needs to be wback+inv so that
+ * icache fetch by userspace sees code correctly.
*/
- if ((vma->vm_flags & VM_EXEC) ||
- addr_not_cache_congruent(paddr, vaddr)) {
-
- int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
+ if (vma->vm_flags & VM_EXEC) {
+ struct folio *folio = page_folio(page);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags.f);
if (dirty) {
+ unsigned long offset = offset_in_folio(folio, paddr);
+ nr = folio_nr_pages(folio);
+ paddr -= offset;
+ vaddr -= offset;
/* wback + inv dcache lines (K-mapping) */
- __flush_dcache_page(paddr, paddr);
+ __flush_dcache_pages(paddr, paddr, nr);
/* invalidate any existing icache lines (U-mapping) */
if (vma->vm_flags & VM_EXEC)
- __inv_icache_page(paddr, vaddr);
+ __inv_icache_pages(paddr, vaddr, nr);
}
}
}
@@ -531,7 +531,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
pte_t pte = __pte(pmd_val(*pmd));
- update_mmu_cache(vma, addr, &pte);
+ update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
}
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -560,89 +560,64 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
-void read_decode_mmu_bcr(void)
+int arc_mmu_mumbojumbo(int c, char *buf, int len)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int tmp;
- struct bcr_mmu_3 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
- u_itlb:4, u_dtlb:4;
-#else
- unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
- ways:4, ver:8;
-#endif
- } *mmu3;
-
- struct bcr_mmu_4 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
- n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
-#else
- /* DTLB ITLB JES JE JA */
- unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
- pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
-#endif
- } *mmu4;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned int bcr, u_dtlb, u_itlb, sasid;
+ struct bcr_mmu_3 *mmu3;
+ struct bcr_mmu_4 *mmu4;
+ char super_pg[64] = "";
+ int n = 0;
- tmp = read_aux_reg(ARC_REG_MMU_BCR);
- mmu->ver = (tmp >> 24);
+ bcr = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (bcr >> 24);
if (is_isa_arcompact() && mmu->ver == 3) {
- mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu3 = (struct bcr_mmu_3 *)&bcr;
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
mmu->sets = 1 << mmu3->sets;
mmu->ways = 1 << mmu3->ways;
- mmu->u_dtlb = mmu3->u_dtlb;
- mmu->u_itlb = mmu3->u_itlb;
- mmu->sasid = mmu3->sasid;
+ u_dtlb = mmu3->u_dtlb;
+ u_itlb = mmu3->u_itlb;
+ sasid = mmu3->sasid;
} else {
- mmu4 = (struct bcr_mmu_4 *)&tmp;
+ mmu4 = (struct bcr_mmu_4 *)&bcr;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu->sets = 64 << mmu4->n_entry;
mmu->ways = mmu4->n_ways * 2;
- mmu->u_dtlb = mmu4->u_dtlb * 4;
- mmu->u_itlb = mmu4->u_itlb * 4;
- mmu->sasid = mmu4->sasid;
- pae_exists = mmu->pae = mmu4->pae;
+ u_dtlb = mmu4->u_dtlb * 4;
+ u_itlb = mmu4->u_itlb * 4;
+ sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
}
-}
-char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
- char super_pg[64] = "";
-
- if (p_mmu->s_pg_sz_m)
- scnprintf(super_pg, 64, "%dM Super Page %s",
- p_mmu->s_pg_sz_m,
- IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
+ if (mmu->s_pg_sz_m)
+ scnprintf(super_pg, 64, "/%dM%s",
+ mmu->s_pg_sz_m,
+ IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
- p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
- p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
- p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
-
- return buf;
+ "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
+ mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
+ mmu->sets, mmu->ways,
+ u_dtlb, u_itlb,
+ IS_AVAIL1(sasid, ", SASID"),
+ IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
+
+ return n;
}
int pae40_exist_but_not_enab(void)
{
- return pae_exists && !is_pae40_enabled();
+ return mmuinfo.pae && !is_pae40_enabled();
}
void arc_mmu_init(void)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- char str[256];
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
int compat = 0;
- pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
-
/*
* Can't be done in processor.h due to header include dependencies
*/
@@ -719,7 +694,7 @@ volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
int set, n_ways = mmu->ways;
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index e054780a8fe0..dc65e87a531f 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -5,19 +5,19 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* Vineetg: April 2011 :
- * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v1: moved out legacy code into a separate file
* -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
* helps avoid a shift when preparing PD0 from PTE
*
* Vineetg: July 2009
- * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
- * entry, so that it doesn't knock out it's I-TLB entry
+ * -For MMU V2, we need not do heuristics at the time of committing a D-TLB
+ * entry, so that it doesn't knock out its I-TLB entry
* -Some more fine tuning:
* bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
*
* Vineetg: July 2009
* -Practically rewrote the I/D TLB Miss handlers
- * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Now 40 and 135 instructions apiece as compared to 131 and 449 resp.
* Hence Leaner by 1.5 K
* Used Conditional arithmetic to replace excessive branching
* Also used short instructions wherever possible