summaryrefslogtreecommitdiff
path: root/arch/arc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/cache.c438
-rw-r--r--arch/arc/mm/dma.c6
-rw-r--r--arch/arc/mm/extable.c23
-rw-r--r--arch/arc/mm/fault.c95
-rw-r--r--arch/arc/mm/highmem.c84
-rw-r--r--arch/arc/mm/init.c125
-rw-r--r--arch/arc/mm/ioremap.c51
-rw-r--r--arch/arc/mm/mmap.c41
-rw-r--r--arch/arc/mm/tlb.c407
-rw-r--r--arch/arc/mm/tlbex.S93
10 files changed, 356 insertions, 1007 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index a2fbea3ee07c..9106ceac323c 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -28,6 +28,10 @@ int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
+static struct cpuinfo_arc_cache {
+ unsigned int sz_k, line_len, colors;
+} ic_info, dc_info, slc_info;
+
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page);
@@ -35,78 +39,24 @@ void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
-char *arc_cache_mumbojumbo(int c, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc_cache *p;
-
-#define PR_CACHE(p, cfg, str) \
- if (!(p)->line_len) \
- n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
- else \
- n += scnprintf(buf + n, len - n, \
- str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
- (p)->sz_k, (p)->assoc, (p)->line_len, \
- (p)->vipt ? "VIPT" : "PIPT", \
- (p)->alias ? " aliasing" : "", \
- IS_USED_CFG(cfg));
-
- PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
- PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
-
- p = &cpuinfo_arc700[c].slc;
- if (p->line_len)
- n += scnprintf(buf + n, len - n,
- "SLC\t\t: %uK, %uB Line%s\n",
- p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
-
- n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
- perip_base,
- IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
-
- return buf;
-}
-
-/*
- * Read the Cache Build Confuration Registers, Decode them and save into
- * the cpuinfo structure for later use.
- * No Validation done here, simply read/convert the BCRs
- */
-static void read_decode_cache_bcr_arcv2(int cpu)
+static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
{
- struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
+ struct cpuinfo_arc_cache *p_slc = &slc_info;
+ struct bcr_identity ident;
struct bcr_generic sbcr;
-
- struct bcr_slc_cfg {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:24, way:2, lsz:2, sz:4;
-#else
- unsigned int sz:4, lsz:2, way:2, pad:24;
-#endif
- } slc_cfg;
-
- struct bcr_clust_cfg {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
-#else
- unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
-#endif
- } cbcr;
-
- struct bcr_volatile {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:4, limit:4, pad:22, order:1, disable:1;
-#else
- unsigned int disable:1, order:1, pad:22, limit:4, start:4;
-#endif
- } vol;
-
+ struct bcr_clust_cfg cbcr;
+ struct bcr_volatile vol;
+ int n = 0;
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
+ struct bcr_slc_cfg slc_cfg;
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+ n += scnprintf(buf + n, len - n,
+ "SLC\t\t: %uK, %uB Line%s\n",
+ p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
@@ -129,70 +79,82 @@ static void read_decode_cache_bcr_arcv2(int cpu)
ioc_enable = 0;
}
+ READ_BCR(AUX_IDENTITY, ident);
+
/* HS 2.0 didn't have AUX_VOL */
- if (cpuinfo_arc700[cpu].core.family > 0x51) {
+ if (ident.family > 0x51) {
READ_BCR(AUX_VOL, vol);
perip_base = vol.start << 28;
/* HS 3.0 has limit and strict-ordering fields */
- if (cpuinfo_arc700[cpu].core.family > 0x52)
+ if (ident.family > 0x52)
perip_end = (vol.limit << 28) - 1;
}
+
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
+
+ return n;
}
-void read_decode_cache_bcr(void)
+int arc_cache_mumbojumbo(int c, char *buf, int len)
{
- struct cpuinfo_arc_cache *p_ic, *p_dc;
- unsigned int cpu = smp_processor_id();
- struct bcr_cache {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
-#else
- unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
-#endif
- } ibcr, dbcr;
+ struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
+ struct bcr_cache ibcr, dbcr;
+ int vipt, assoc;
+ int n = 0;
- p_ic = &cpuinfo_arc700[cpu].icache;
READ_BCR(ARC_REG_IC_BCR, ibcr);
-
if (!ibcr.ver)
goto dc_chk;
- if (ibcr.ver <= 3) {
+ if (is_isa_arcompact() && (ibcr.ver <= 3)) {
BUG_ON(ibcr.config != 3);
- p_ic->assoc = 2; /* Fixed to 2w set assoc */
- } else if (ibcr.ver >= 4) {
- p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
+ assoc = 2; /* Fixed to 2w set assoc */
+ } else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
+ assoc = 1 << ibcr.config; /* 1,2,4,8 */
}
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
- p_ic->vipt = 1;
- p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
+ p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
+
+ n += scnprintf(buf + n, len - n,
+ "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
+ p_ic->sz_k, assoc, p_ic->line_len,
+ p_ic->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
dc_chk:
- p_dc = &cpuinfo_arc700[cpu].dcache;
READ_BCR(ARC_REG_DC_BCR, dbcr);
-
if (!dbcr.ver)
goto slc_chk;
- if (dbcr.ver <= 3) {
+ if (is_isa_arcompact() && (dbcr.ver <= 3)) {
BUG_ON(dbcr.config != 2);
- p_dc->assoc = 4; /* Fixed to 4w set assoc */
- p_dc->vipt = 1;
- p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
- } else if (dbcr.ver >= 4) {
- p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
- p_dc->vipt = 0;
- p_dc->alias = 0; /* PIPT so can't VIPT alias */
+ vipt = 1;
+ assoc = 4; /* Fixed to 4w set assoc */
+ p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
+ } else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
+ vipt = 0;
+ assoc = 1 << dbcr.config; /* 1,2,4,8 */
+ p_dc->colors = 1; /* PIPT so can't VIPT alias */
}
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
+ n += scnprintf(buf + n, len - n,
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
+ p_dc->sz_k, assoc, p_dc->line_len,
+ vipt ? "VIPT" : "PIPT",
+ IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
+
slc_chk:
if (is_isa_arcv2())
- read_decode_cache_bcr_arcv2(cpu);
+ n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
+
+ return n;
}
/*
@@ -205,93 +167,24 @@ slc_chk:
#define OP_INV_IC 0x4
/*
- * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
- *
- * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
- * The orig Cache Management Module "CDU" only required paddr to invalidate a
- * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
- * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
- * the exact same line.
+ * Cache Flush programming model
*
- * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
- * paddr alone could not be used to correctly index the cache.
+ * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
+ * Programming model requires both paddr and vaddr irrespecive of aliasing
+ * considerations:
+ * - vaddr in {I,D}C_IV?L
+ * - paddr in {I,D}C_PTAG
*
- * ------------------
- * MMU v1/v2 (Fixed Page Size 8k)
- * ------------------
- * The solution was to provide CDU with these additonal vaddr bits. These
- * would be bits [x:13], x would depend on cache-geometry, 13 comes from
- * standard page size of 8k.
- * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
- * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
- * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
- * represent the offset within cache-line. The adv of using this "clumsy"
- * interface for additional info was no new reg was needed in CDU programming
- * model.
+ * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
+ * Programming model is different for aliasing vs. non-aliasing I$
+ * - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
+ * - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
*
- * 17:13 represented the max num of bits passable, actual bits needed were
- * fewer, based on the num-of-aliases possible.
- * -for 2 alias possibility, only bit 13 needed (32K cache)
- * -for 4 alias possibility, bits 14:13 needed (64K cache)
- *
- * ------------------
- * MMU v3
- * ------------------
- * This ver of MMU supports variable page sizes (1k-16k): although Linux will
- * only support 8k (default), 16k and 4k.
- * However from hardware perspective, smaller page sizes aggravate aliasing
- * meaning more vaddr bits needed to disambiguate the cache-line-op ;
- * the existing scheme of piggybacking won't work for certain configurations.
- * Two new registers IC_PTAG and DC_PTAG inttoduced.
- * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
+ * - If PAE40 is enabled, independent of aliasing considerations, the higher
+ * bits needs to be written into PTAG_HI
*/
static inline
-void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
- unsigned long sz, const int op, const int full_page)
-{
- unsigned int aux_cmd;
- int num_lines;
-
- if (op == OP_INV_IC) {
- aux_cmd = ARC_REG_IC_IVIL;
- } else {
- /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
- aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
- }
-
- /* Ensure we properly floor/ceil the non-line aligned/sized requests
- * and have @paddr - aligned to cache line and integral @num_lines.
- * This however can be avoided for page sized since:
- * -@paddr will be cache-line aligned already (being page aligned)
- * -@sz will be integral multiple of line size (being page sized).
- */
- if (!full_page) {
- sz += paddr & ~CACHE_LINE_MASK;
- paddr &= CACHE_LINE_MASK;
- vaddr &= CACHE_LINE_MASK;
- }
-
- num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
-
- /* MMUv2 and before: paddr contains stuffed vaddrs bits */
- paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
-
- while (num_lines-- > 0) {
- write_aux_reg(aux_cmd, paddr);
- paddr += L1_CACHE_BYTES;
- }
-}
-
-/*
- * For ARC700 MMUv3 I-cache and D-cache flushes
- * - ARC700 programming model requires paddr and vaddr be passed in seperate
- * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
- * caches actually alias or not.
- * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
- * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
- */
-static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page)
{
@@ -350,17 +243,6 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
#ifndef USE_RGN_FLSH
/*
- * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
- * Here's how cache ops are implemented
- *
- * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
- * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
- * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
- * respectively, similar to MMU v3 programming model, hence
- * __cache_line_loop_v3() is used)
- *
- * If PAE40 is enabled, independent of aliasing considerations, the higher bits
- * needs to be written into PTAG_HI
*/
static inline
void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
@@ -460,11 +342,9 @@ void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
#endif
-#if (CONFIG_ARC_MMU_VER < 3)
-#define __cache_line_loop __cache_line_loop_v2
-#elif (CONFIG_ARC_MMU_VER == 3)
+#ifdef CONFIG_ARC_MMU_V3
#define __cache_line_loop __cache_line_loop_v3
-#elif (CONFIG_ARC_MMU_VER > 3)
+#else
#define __cache_line_loop __cache_line_loop_v4
#endif
@@ -483,7 +363,7 @@ static inline void __before_dc_op(const int op)
{
if (op == OP_FLUSH_N_INV) {
/* Dcache provides 2 cmd: FLUSH or INV
- * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
* flush-n-inv is achieved by INV cmd but with IM=1
* So toggle INV sub-mode depending on op request and default
*/
@@ -663,7 +543,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
-noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
+static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -726,7 +606,7 @@ noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
-noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
+static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -822,47 +702,16 @@ static inline void arc_slc_enable(void)
* Exported APIs
*/
-/*
- * Handle cache congruency of kernel and userspace mappings of page when kernel
- * writes-to/reads-from
- *
- * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
- * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
- * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
- * -In SMP, if hardware caches are coherent
- *
- * There's a corollary case, where kernel READs from a userspace mapped page.
- * If the U-mapping is not congruent to to K-mapping, former needs flushing.
- */
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
- struct address_space *mapping;
-
- if (!cache_is_vipt_aliasing()) {
- clear_bit(PG_dc_clean, &page->flags);
- return;
- }
-
- /* don't handle anon pages here */
- mapping = page_mapping_file(page);
- if (!mapping)
- return;
-
- /*
- * pagecache page, file not yet mapped to userspace
- * Make a note that K-mapping is dirty
- */
- if (!mapping_mapped(mapping)) {
- clear_bit(PG_dc_clean, &page->flags);
- } else if (page_mapcount(page)) {
-
- /* kernel reading from page with U-mapping */
- phys_addr_t paddr = (unsigned long)page_address(page);
- unsigned long vaddr = page->index << PAGE_SHIFT;
+ clear_bit(PG_dc_clean, &folio->flags);
+ return;
+}
+EXPORT_SYMBOL(flush_dcache_folio);
- if (addr_not_cache_congruent(paddr, vaddr))
- __flush_dcache_page(paddr, vaddr);
- }
+void flush_dcache_page(struct page *page)
+{
+ return flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);
@@ -992,7 +841,7 @@ EXPORT_SYMBOL(flush_icache_range);
* @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
* However in one instance, when called by kprobe (for a breakpt in
* builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
- * use a paddr to index the cache (despite VIPT). This is fine since since a
+ * use a paddr to index the cache (despite VIPT). This is fine since a
* builtin kernel page will not have any virtual mappings.
* kprobe on loadable module will be kernel vaddr.
*/
@@ -1003,18 +852,18 @@ void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
}
/* wrapper to compile time eliminate alignment checks in flush loop */
-void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
+void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
{
- __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
+ __ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
}
/*
* wrapper to clearout kernel or userspace mappings of a page
* For kernel mappings @vaddr == @paddr
*/
-void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
+void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
{
- __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
+ __dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
}
noinline void flush_cache_all(void)
@@ -1030,89 +879,18 @@ noinline void flush_cache_all(void)
}
-#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
-
-void flush_cache_mm(struct mm_struct *mm)
-{
- flush_cache_all();
-}
-
-void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
- unsigned long pfn)
-{
- phys_addr_t paddr = pfn << PAGE_SHIFT;
-
- u_vaddr &= PAGE_MASK;
-
- __flush_dcache_page(paddr, u_vaddr);
-
- if (vma->vm_flags & VM_EXEC)
- __inv_icache_page(paddr, u_vaddr);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- flush_cache_all();
-}
-
-void flush_anon_page(struct vm_area_struct *vma, struct page *page,
- unsigned long u_vaddr)
-{
- /* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
- __flush_dcache_page((phys_addr_t)page_address(page),
- (phys_addr_t)page_address(page));
-
-}
-
-#endif
-
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
+ struct folio *src = page_folio(from);
+ struct folio *dst = page_folio(to);
void *kfrom = kmap_atomic(from);
void *kto = kmap_atomic(to);
- int clean_src_k_mappings = 0;
-
- /*
- * If SRC page was already mapped in userspace AND it's U-mapping is
- * not congruent with K-mapping, sync former to physical page so that
- * K-mapping in memcpy below, sees the right data
- *
- * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
- * equally valid for SRC page as well
- *
- * For !VIPT cache, all of this gets compiled out as
- * addr_not_cache_congruent() is 0
- */
- if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
- __flush_dcache_page((unsigned long)kfrom, u_vaddr);
- clean_src_k_mappings = 1;
- }
copy_page(kto, kfrom);
- /*
- * Mark DST page K-mapping as dirty for a later finalization by
- * update_mmu_cache(). Although the finalization could have been done
- * here as well (given that both vaddr/paddr are available).
- * But update_mmu_cache() already has code to do that for other
- * non copied user pages (e.g. read faults which wire in pagecache page
- * directly).
- */
- clear_bit(PG_dc_clean, &to->flags);
-
- /*
- * if SRC was already usermapped and non-congruent to kernel mapping
- * sync the kernel mapping back to physical page
- */
- if (clean_src_k_mappings) {
- __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
- set_bit(PG_dc_clean, &from->flags);
- } else {
- clear_bit(PG_dc_clean, &from->flags);
- }
+ clear_bit(PG_dc_clean, &dst->flags);
+ clear_bit(PG_dc_clean, &src->flags);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@@ -1120,10 +898,11 @@ void copy_user_highpage(struct page *to, struct page *from,
void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
{
+ struct folio *folio = page_folio(page);
clear_page(to);
- clear_bit(PG_dc_clean, &page->flags);
+ clear_bit(PG_dc_clean, &folio->flags);
}
-
+EXPORT_SYMBOL(clear_user_page);
/**********************************************************************
* Explicit Cache flush request from user space via syscall
@@ -1151,7 +930,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
-noinline void __init arc_ioc_setup(void)
+static noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
@@ -1213,12 +992,10 @@ noinline void __init arc_ioc_setup(void)
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
-void __init arc_cache_init_master(void)
+static noinline void __init arc_cache_init_master(void)
{
- unsigned int __maybe_unused cpu = smp_processor_id();
-
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
- struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ struct cpuinfo_arc_cache *ic = &ic_info;
if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
@@ -1231,14 +1008,14 @@ void __init arc_cache_init_master(void)
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
- if (is_isa_arcv2() && ic->alias)
+ if (is_isa_arcv2() && ic->colors > 1)
_cache_line_loop_ic_fn = __cache_line_loop_v3;
else
_cache_line_loop_ic_fn = __cache_line_loop;
}
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
- struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+ struct cpuinfo_arc_cache *dc = &dc_info;
if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
@@ -1248,18 +1025,8 @@ void __init arc_cache_init_master(void)
dc->line_len, L1_CACHE_BYTES);
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
- if (is_isa_arcompact()) {
- int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
- int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
-
- if (dc->alias) {
- if (!handled)
- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- if (CACHE_COLORS_NUM != num_colors)
- panic("CACHE_COLORS_NUM not optimized for config\n");
- } else if (!dc->alias && handled) {
- panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- }
+ if (is_isa_arcompact() && dc->colors > 1) {
+ panic("Aliasing VIPT cache not supported\n");
}
}
@@ -1300,9 +1067,6 @@ void __init arc_cache_init_master(void)
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
- char str[256];
-
- pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
if (!cpu)
arc_cache_init_master();
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index e947572a521e..197707bc7658 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -3,7 +3,7 @@
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -32,7 +32,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
/*
* Cache operations depending on function and direction argument, inspired by
- * https://lkml.org/lkml/2018/5/18/979
+ * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
* dma-mapping: provide a generic dma-noncoherent implementation)"
*
@@ -91,7 +91,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
* Plug in direct dma map ops.
*/
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *iommu, bool coherent)
+ bool coherent)
{
/*
* IOC hardware snoops all DMA traffic keeping the caches consistent
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
index b06b09ddf924..88fa3a4d4906 100644
--- a/arch/arc/mm/extable.c
+++ b/arch/arc/mm/extable.c
@@ -22,26 +22,3 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
-
-#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-
-unsigned long arc_clear_user_noinline(void __user *to,
- unsigned long n)
-{
- return __arc_clear_user(to, n);
-}
-EXPORT_SYMBOL(arc_clear_user_noinline);
-
-long arc_strncpy_from_user_noinline(char *dst, const char __user *src,
- long count)
-{
- return __arc_strncpy_from_user(dst, src, count);
-}
-EXPORT_SYMBOL(arc_strncpy_from_user_noinline);
-
-long arc_strnlen_user_noinline(const char __user *src, long n)
-{
- return __arc_strnlen_user(src, n);
-}
-EXPORT_SYMBOL(arc_strnlen_user_noinline);
-#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index fb86bc3e9b35..95119a5e7761 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -13,7 +13,7 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/mm_types.h>
-#include <asm/pgalloc.h>
+#include <asm/entry.h>
#include <asm/mmu.h>
/*
@@ -34,28 +34,34 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address)
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
- pgd = pgd_offset_fast(current->active_mm, address);
+ pgd = pgd_offset(current->active_mm, address);
pgd_k = pgd_offset_k(address);
- if (!pgd_present(*pgd_k))
+ if (pgd_none (*pgd_k))
goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
p4d = p4d_offset(pgd, address);
p4d_k = p4d_offset(pgd_k, address);
- if (!p4d_present(*p4d_k))
+ if (p4d_none(*p4d_k))
goto bad_area;
+ if (!p4d_present(*p4d))
+ set_p4d(p4d, *p4d_k);
pud = pud_offset(p4d, address);
pud_k = pud_offset(p4d_k, address);
- if (!pud_present(*pud_k))
+ if (pud_none(*pud_k))
goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
+ if (pmd_none(*pmd_k))
goto bad_area;
-
- set_pmd(pmd, *pmd_k);
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
/* XXX: create the TLB entry here */
return 0;
@@ -94,28 +100,23 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (faulthandler_disabled() || !mm)
goto no_context;
- if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
+ if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
- else if ((regs->ecr_vec == ECR_V_PROTV) &&
- (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
+ else if ((regs->ecr.vec == ECR_V_PROTV) &&
+ (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
- flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if (write)
flags |= FAULT_FLAG_WRITE;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
-
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (unlikely(address < vma->vm_start)) {
- if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
- goto bad_area;
- }
+ goto bad_area_nosemaphore;
/*
* vm_area is good, now check permissions for this memory access
@@ -131,56 +132,38 @@ retry:
goto bad_area;
}
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
/*
- * Fault retry nuances
+ * Fault retry nuances, mmap_lock already relinquished by core mm
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
-
- /*
- * If fault needs to be retried, handle any pending signals
- * first (by returning to user mode).
- * mmap_sem already relinquished by core mm for RETRY case
- */
- if (fatal_signal_pending(current)) {
- if (!user_mode(regs))
- goto no_context;
- return;
- }
- /*
- * retry state machine
- */
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
+bad_area_nosemaphore:
/*
* Major/minor page fault accounting
* (in case of retry we only land here once)
*/
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-
- if (likely(!(fault & VM_FAULT_ERROR))) {
- if (fault & VM_FAULT_MAJOR) {
- tsk->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
- regs, address);
- } else {
- tsk->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
- regs, address);
- }
-
+ if (likely(!(fault & VM_FAULT_ERROR)))
/* Normal return path: fault Handled Gracefully */
return;
- }
if (!user_mode(regs))
goto no_context;
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index fc8849e4f72e..c79912a6b196 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -6,8 +6,8 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/highmem.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -36,9 +36,8 @@
* This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
* 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
*
- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
- * slots across NR_CPUS would be more than sufficient (generic code defines
- * KM_TYPE_NR as 20).
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
*
* - pkmap being preemptible, in theory could do with more than 256 concurrent
* mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
@@ -47,80 +46,12 @@
*/
extern pte_t * pkmap_page_table;
-static pte_t * fixmap_page_table;
-
-void *kmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return page_address(page);
-
- return kmap_high(page);
-}
-EXPORT_SYMBOL(kmap);
-
-void *kmap_atomic(struct page *page)
-{
- int idx, cpu_idx;
- unsigned long vaddr;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- cpu_idx = kmap_atomic_idx_push();
- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
- vaddr = FIXMAP_ADDR(idx);
-
- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
- mk_pte(page, kmap_prot));
-
- return (void *)vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic);
-
-void __kunmap_atomic(void *kv)
-{
- unsigned long kvaddr = (unsigned long)kv;
-
- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
-
- /*
- * Because preemption is disabled, this vaddr can be associated
- * with the current allocated index.
- * But in case of multiple live kmap_atomic(), it still relies on
- * callers to unmap in right order.
- */
- int cpu_idx = kmap_atomic_idx();
- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
-
- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
-
- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
-
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
{
- pgd_t *pgd_k;
- p4d_t *p4d_k;
- pud_t *pud_k;
- pmd_t *pmd_k;
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
pte_t *pte_k;
- pgd_k = pgd_offset_k(kvaddr);
- p4d_k = p4d_offset(pgd_k, kvaddr);
- pud_k = pud_offset(p4d_k, kvaddr);
- pmd_k = pmd_offset(pud_k, kvaddr);
-
pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
if (!pte_k)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
@@ -134,10 +65,9 @@ void __init kmap_init(void)
{
/* Due to recursive include hell, we can't do this in processor.h */
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
-
- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
+ alloc_kmap_pgtable(FIXMAP_BASE);
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 0920c969c466..6a71b23f1383 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -14,8 +14,8 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/arcregs.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
@@ -27,13 +27,10 @@ static unsigned long low_mem_sz;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
-static u64 high_mem_start;
-static u64 high_mem_sz;
-#endif
-
-#ifdef CONFIG_DISCONTIGMEM
-struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
+unsigned long arch_pfn_offset;
+EXPORT_SYMBOL(arch_pfn_offset);
#endif
long __init arc_get_mem_sz(void)
@@ -63,11 +60,14 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
low_mem_sz = size;
in_use = 1;
+ memblock_add_node(base, size, 0, MEMBLOCK_NONE);
} else {
#ifdef CONFIG_HIGHMEM
high_mem_start = base;
high_mem_sz = size;
in_use = 1;
+ memblock_add_node(base, size, 1, MEMBLOCK_NONE);
+ memblock_reserve(base, size);
#endif
}
@@ -83,25 +83,16 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
*/
void __init setup_arch_memory(void)
{
- unsigned long zones_size[MAX_NR_ZONES];
- unsigned long zones_holes[MAX_NR_ZONES];
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- init_mm.start_code = (unsigned long)_text;
- init_mm.end_code = (unsigned long)_etext;
- init_mm.end_data = (unsigned long)_edata;
- init_mm.brk = (unsigned long)_end;
+ setup_initial_init_mm(_text, _etext, _edata, _end);
/* first page of system - kernel .vector starts here */
- min_low_pfn = ARCH_PFN_OFFSET;
+ min_low_pfn = virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE);
/* Last usable page of low mem */
max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
-#ifdef CONFIG_FLATMEM
- /* pfn_valid() uses this */
- max_mapnr = max_low_pfn - min_low_pfn;
-#endif
-
/*------------- bootmem allocator setup -----------------------*/
/*
@@ -115,7 +106,6 @@ void __init setup_arch_memory(void)
* the crash
*/
- memblock_add_node(low_mem_start, low_mem_sz, 0);
memblock_reserve(CONFIG_LINUX_LINK_BASE,
__pa(_end) - CONFIG_LINUX_LINK_BASE);
@@ -133,54 +123,55 @@ void __init setup_arch_memory(void)
memblock_dump_all();
/*----------------- node/zones setup --------------------------*/
- memset(zones_size, 0, sizeof(zones_size));
- memset(zones_holes, 0, sizeof(zones_holes));
-
- zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
- zones_holes[ZONE_NORMAL] = 0;
-
- /*
- * We can't use the helper free_area_init(zones[]) because it uses
- * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
- * when our kernel doesn't start at PAGE_OFFSET, i.e.
- * PAGE_OFFSET != CONFIG_LINUX_RAM_BASE
- */
- free_area_init_node(0, /* node-id */
- zones_size, /* num pages per zone */
- min_low_pfn, /* first pfn of node */
- zones_holes); /* holes */
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
/*
- * Populate a new node with highmem
- *
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
- * than addresses in normal ala low memory (0x8000_0000 based).
+ * than addresses in normal aka low memory (0x8000_0000 based).
* Even with PAE, the huge peripheral space hole would waste a lot of
- * mem with single mem_map[]. This warrants a mem_map per region design.
- * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
- *
- * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
- * populated with normal memory zone while node 1 only has highmem
+ * mem with single contiguous mem_map[].
+ * Thus when HIGHMEM on ARC is enabled the memory map corresponding
+ * to the hole is freed and ARC specific version of pfn_valid()
+ * handles the hole in the memory map.
*/
- node_set_online(1);
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- zones_size[ZONE_NORMAL] = 0;
- zones_holes[ZONE_NORMAL] = 0;
-
- zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
- zones_holes[ZONE_HIGHMEM] = 0;
-
- free_area_init_node(1, /* node-id */
- zones_size, /* num pages per zone */
- min_high_pfn, /* first pfn of node */
- zones_holes); /* holes */
+ /*
+ * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+ * For HIGHMEM without PAE max_high_pfn should be less than
+ * min_low_pfn to guarantee that these two regions don't overlap.
+ * For PAE case highmem is greater than lowmem, so it is natural
+ * to use max_high_pfn.
+ *
+ * In both cases, holes should be handled by pfn_valid().
+ */
+ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
+
+ arch_pfn_offset = min(min_low_pfn, min_high_pfn);
kmap_init();
+
+#else /* CONFIG_HIGHMEM */
+ /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */
+ max_mapnr = max_low_pfn - min_low_pfn;
+
+#endif /* CONFIG_HIGHMEM */
+
+ free_area_init(max_zone_pfn);
+}
+
+static void __init highmem_init(void)
+{
+#ifdef CONFIG_HIGHMEM
+ unsigned long tmp;
+
+ memblock_phys_free(high_mem_start, high_mem_sz);
+ for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
+ free_highmem_page(pfn_to_page(tmp));
#endif
}
@@ -192,14 +183,20 @@ void __init setup_arch_memory(void)
*/
void __init mem_init(void)
{
-#ifdef CONFIG_HIGHMEM
- unsigned long tmp;
+ memblock_free_all();
+ highmem_init();
- reset_all_zones_managed_pages();
- for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
- free_highmem_page(pfn_to_page(tmp));
-#endif
+ BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PMD * sizeof(pmd_t)) > PAGE_SIZE);
+ BUILD_BUG_ON((PTRS_PER_PTE * sizeof(pte_t)) > PAGE_SIZE);
+}
- memblock_free_all();
- mem_init_print_info(NULL);
+#ifdef CONFIG_HIGHMEM
+int pfn_valid(unsigned long pfn)
+{
+ return (pfn >= min_high_pfn && pfn <= max_high_pfn) ||
+ (pfn >= min_low_pfn && pfn <= max_low_pfn);
}
+EXPORT_SYMBOL(pfn_valid);
+#endif
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index fac4adc90204..b07004d53267 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/mm.h>
-#include <linux/slab.h>
#include <linux/cache.h>
static inline bool arc_uncached_addr_space(phys_addr_t paddr)
@@ -25,13 +24,6 @@ static inline bool arc_uncached_addr_space(phys_addr_t paddr)
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
{
- phys_addr_t end;
-
- /* Don't allow wraparound or zero size */
- end = paddr + size - 1;
- if (!size || (end < paddr))
- return NULL;
-
/*
* If the region is h/w uncached, MMU mapping can be elided as optim
* The cast to u32 is fine as this region can only be inside 4GB
@@ -39,7 +31,8 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
if (arc_uncached_addr_space(paddr))
return (void __iomem *)(u32)paddr;
- return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
+ return ioremap_prot(paddr, size,
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
}
EXPORT_SYMBOL(ioremap);
@@ -50,54 +43,22 @@ EXPORT_SYMBOL(ioremap);
* ARC hardware uncached region, this one still goes thru the MMU as caller
* might need finer access control (R/W/X)
*/
-void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
unsigned long flags)
{
- unsigned long vaddr;
- struct vm_struct *area;
- phys_addr_t off, end;
pgprot_t prot = __pgprot(flags);
- /* Don't allow wraparound, zero size */
- end = paddr + size - 1;
- if ((!size) || (end < paddr))
- return NULL;
-
- /* An early platform driver might end up here */
- if (!slab_is_available())
- return NULL;
-
/* force uncached */
- prot = pgprot_noncached(prot);
-
- /* Mappings have to be page-aligned */
- off = paddr & ~PAGE_MASK;
- paddr &= PAGE_MASK;
- size = PAGE_ALIGN(end + 1) - paddr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- area->phys_addr = paddr;
- vaddr = (unsigned long)area->addr;
- if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
- vunmap((void __force *)vaddr);
- return NULL;
- }
- return (void __iomem *)(off + (char __iomem *)vaddr);
+ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
}
EXPORT_SYMBOL(ioremap_prot);
-
-void iounmap(const void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
/* weird double cast to handle phys_addr_t > 32 bits */
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
return;
- vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+ generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 722d26b94307..3c1c7ae73292 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -14,10 +14,6 @@
#include <asm/cacheflush.h>
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
- (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
-
/*
* Ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches.
@@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- int do_align = 0;
- int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
- * We only need to do colour alignment if D cache aliases.
- */
- if (aliasing)
- do_align = filp || (flags & MAP_SHARED);
-
- /*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
- if (aliasing && flags & MAP_SHARED &&
+ if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
@@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (addr) {
- if (do_align)
- addr = COLOUR_ALIGN(addr, pgoff);
- else
- addr = PAGE_ALIGN(addr);
+ addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
@@ -70,7 +55,27 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
- info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_U_NONE,
+ [VM_READ] = PAGE_U_R,
+ [VM_WRITE] = PAGE_U_R,
+ [VM_WRITE | VM_READ] = PAGE_U_R,
+ [VM_EXEC] = PAGE_U_X_R,
+ [VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE] = PAGE_U_X_R,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED] = PAGE_U_NONE,
+ [VM_SHARED | VM_READ] = PAGE_U_R,
+ [VM_SHARED | VM_WRITE] = PAGE_U_W_R,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R,
+ [VM_SHARED | VM_EXEC] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index c340acd989a0..ad702b49aeb3 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -1,51 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * TLB Management (flush/create/diagnostics) for ARC700
+ * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
- * vineetg: Aug 2011
- * -Reintroduce duplicate PD fixup - some customer chips still have the issue
- *
- * vineetg: May 2011
- * -No need to flush_cache_page( ) for each call to update_mmu_cache()
- * some of the LMBench tests improved amazingly
- * = page-fault thrice as fast (75 usec to 28 usec)
- * = mmap twice as fast (9.6 msec to 4.6 msec),
- * = fork (5.3 msec to 3.7 msec)
- *
- * vineetg: April 2011 :
- * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
- * helps avoid a shift when preparing PD0 from PTE
- *
- * vineetg: April 2011 : Preparing for MMU V3
- * -MMU v2/v3 BCRs decoded differently
- * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
- * -tlb_entry_erase( ) can be void
- * -local_flush_tlb_range( ):
- * = need not "ceil" @end
- * = walks MMU only if range spans < 32 entries, as opposed to 256
- *
- * Vineetg: Sept 10th 2008
- * -Changes related to MMU v2 (Rel 4.8)
- *
- * Vineetg: Aug 29th 2008
- * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
- * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
- * it fails. Thus need to load it with ANY valid value before invoking
- * TLBIVUTLB cmd
- *
- * Vineetg: Aug 21th 2008:
- * -Reduced the duration of IRQ lockouts in TLB Flush routines
- * -Multiple copies of TLB erase code seperated into a "single" function
- * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
- * in interrupt-safe region.
- *
- * Vineetg: April 23rd Bug #93131
- * Problem: tlb_flush_kernel_range() doesn't do anything if the range to
- * flush is more than the size of TLB itself.
- *
- * Rahul Trivedi : Codito Technologies 2004
*/
#include <linux/module.h>
@@ -57,51 +15,12 @@
#include <asm/mmu_context.h>
#include <asm/mmu.h>
-/* Need for ARC MMU v2
- *
- * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
- * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
- * map into same set, there would be contention for the 2 ways causing severe
- * Thrashing.
- *
- * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
- * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
- * Given this, the thrasing problem should never happen because once the 3
- * J-TLB entries are created (even though 3rd will knock out one of the prev
- * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
- *
- * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
- * This is a simple design for keeping them in sync. So what do we do?
- * The solution which James came up was pretty neat. It utilised the assoc
- * of uTLBs by not invalidating always but only when absolutely necessary.
- *
- * - Existing TLB commands work as before
- * - New command (TLBWriteNI) for TLB write without clearing uTLBs
- * - New command (TLBIVUTLB) to invalidate uTLBs.
- *
- * The uTLBs need only be invalidated when pages are being removed from the
- * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
- * as a result of a miss, the removed entry is still allowed to exist in the
- * uTLBs as it is still valid and present in the OS page table. This allows the
- * full associativity of the uTLBs to hide the limited associativity of the main
- * TLB.
- *
- * During a miss handler, the new "TLBWriteNI" command is used to load
- * entries without clearing the uTLBs.
- *
- * When the OS page table is updated, TLB entries that may be associated with a
- * removed page are removed (flushed) from the TLB using TLBWrite. In this
- * circumstance, the uTLBs must also be cleared. This is done by using the
- * existing TLBWrite command. An explicit IVUTLB is also required for those
- * corner cases when TLBWrite was not executed at all because the corresp
- * J-TLB entry got evicted/replaced.
- */
-
-
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
-static int __read_mostly pae_exists;
+static struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
+} mmuinfo;
/*
* Utility Routine to erase a J-TLB entry
@@ -120,32 +39,10 @@ static inline void __tlb_entry_erase(void)
static void utlb_invalidate(void)
{
-#if (CONFIG_ARC_MMU_VER >= 2)
-
-#if (CONFIG_ARC_MMU_VER == 2)
- /* MMU v2 introduced the uTLB Flush command.
- * There was however an obscure hardware bug, where uTLB flush would
- * fail when a prior probe for J-TLB (both totally unrelated) would
- * return lkup err - because the entry didn't exist in MMU.
- * The Workround was to set Index reg with some valid value, prior to
- * flush. This was fixed in MMU v3
- */
- unsigned int idx;
-
- /* make sure INDEX Reg is valid */
- idx = read_aux_reg(ARC_REG_TLBINDEX);
-
- /* If not write some dummy val */
- if (unlikely(idx & TLB_LKUP_ERR))
- write_aux_reg(ARC_REG_TLBINDEX, 0xa);
-#endif
-
write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
-#endif
-
}
-#if (CONFIG_ARC_MMU_VER < 4)
+#ifdef CONFIG_ARC_MMU_V3
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
{
@@ -176,7 +73,7 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
}
}
-static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
+static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
{
unsigned int idx;
@@ -206,7 +103,7 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
-#else /* CONFIG_ARC_MMU_VER >= 4) */
+#else /* MMUv4 */
static void tlb_entry_erase(unsigned int vaddr_n_asid)
{
@@ -214,13 +111,16 @@ static void tlb_entry_erase(unsigned int vaddr_n_asid)
write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
}
-static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
+static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
{
write_aux_reg(ARC_REG_TLBPD0, pd0);
- write_aux_reg(ARC_REG_TLBPD1, pd1);
- if (is_pae40_enabled())
+ if (!is_pae40_enabled()) {
+ write_aux_reg(ARC_REG_TLBPD1, pd1);
+ } else {
+ write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
+ }
write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
}
@@ -233,7 +133,7 @@ static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
noinline void local_flush_tlb_all(void)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
unsigned int entry;
int num_tlb = mmu->sets * mmu->ways;
@@ -272,7 +172,7 @@ noinline void local_flush_tlb_all(void)
}
/*
- * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ * Flush the entire MM for userland. The fastest way is to move to Next ASID
*/
noinline void local_flush_tlb_mm(struct mm_struct *mm)
{
@@ -303,7 +203,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm)
* Difference between this and Kernel Range Flush is
* -Here the fastest way (if range is too large) is to move to next ASID
* without doing any explicit Shootdown
- * -In case of kernel Flush, entry has to be shot down explictly
+ * -In case of kernel Flush, entry has to be shot down explicitly
*/
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
@@ -491,12 +391,12 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
-void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
+static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
unsigned long pd0;
- pte_t pd1;
+ phys_addr_t pd1;
/*
* create_tlb() assumes that current->mm == vma->mm, since
@@ -505,7 +405,6 @@ void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
*
* Removing the assumption involves
* -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
- * -Fix the TLB paranoid debug code to not trigger false negatives.
* -More importantly it makes this handler inconsistent with fast-path
* TLB Refill handler which always deals with "current"
*
@@ -528,8 +427,6 @@ void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
local_irq_save(flags);
- tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
-
vaddr &= PAGE_MASK;
/* update this PTE credentials */
@@ -572,39 +469,37 @@ void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
* Note that flush (when done) involves both WBACK - so physical page is
* in sync as well as INV - so any non-congruent aliases don't remain
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
- pte_t *ptep)
+void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
+ unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
- phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
- if (page == ZERO_PAGE(0)) {
+ if (page == ZERO_PAGE(0))
return;
- }
/*
- * Exec page : Independent of aliasing/page-color considerations,
- * since icache doesn't snoop dcache on ARC, any dirty
- * K-mapping of a code page needs to be wback+inv so that
- * icache fetch by userspace sees code correctly.
- * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
- * so userspace sees the right data.
- * (Avoids the flush for Non-exec + congruent mapping case)
+ * For executable pages, since icache doesn't snoop dcache, any
+ * dirty K-mapping of a code page needs to be wback+inv so that
+ * icache fetch by userspace sees code correctly.
*/
- if ((vma->vm_flags & VM_EXEC) ||
- addr_not_cache_congruent(paddr, vaddr)) {
-
- int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
+ if (vma->vm_flags & VM_EXEC) {
+ struct folio *folio = page_folio(page);
+ int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
if (dirty) {
+ unsigned long offset = offset_in_folio(folio, paddr);
+ nr = folio_nr_pages(folio);
+ paddr -= offset;
+ vaddr -= offset;
/* wback + inv dcache lines (K-mapping) */
- __flush_dcache_page(paddr, paddr);
+ __flush_dcache_pages(paddr, paddr, nr);
/* invalidate any existing icache lines (U-mapping) */
if (vma->vm_flags & VM_EXEC)
- __inv_icache_page(paddr, vaddr);
+ __inv_icache_pages(paddr, vaddr, nr);
}
}
}
@@ -620,7 +515,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
* Super Page size is configurable in hardware (4K to 16M), but fixed once
* RTL builds.
*
- * The exact THP size a Linx configuration will support is a function of:
+ * The exact THP size a Linux configuration will support is a function of:
* - MMU page size (typical 8K, RTL fixed)
* - software page walker address split between PGD:PTE:PFN (typical
* 11:8:13, but can be changed with 1 line)
@@ -636,44 +531,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd)
{
pte_t pte = __pte(pmd_val(*pmd));
- update_mmu_cache(vma, addr, &pte);
-}
-
-void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pgtable)
-{
- struct list_head *lh = (struct list_head *) pgtable;
-
- assert_spin_locked(&mm->page_table_lock);
-
- /* FIFO */
- if (!pmd_huge_pte(mm, pmdp))
- INIT_LIST_HEAD(lh);
- else
- list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
- pmd_huge_pte(mm, pmdp) = pgtable;
-}
-
-pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
-{
- struct list_head *lh;
- pgtable_t pgtable;
-
- assert_spin_locked(&mm->page_table_lock);
-
- pgtable = pmd_huge_pte(mm, pmdp);
- lh = (struct list_head *) pgtable;
- if (list_empty(lh))
- pmd_huge_pte(mm, pmdp) = NULL;
- else {
- pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
- list_del(lh);
- }
-
- pte_val(pgtable[0]) = 0;
- pte_val(pgtable[1]) = 0;
-
- return pgtable;
+ update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
}
void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -698,139 +556,92 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
#endif
-/* Read the Cache Build Confuration Registers, Decode them and save into
+/* Read the Cache Build Configuration Registers, Decode them and save into
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
-void read_decode_mmu_bcr(void)
+int arc_mmu_mumbojumbo(int c, char *buf, int len)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int tmp;
- struct bcr_mmu_1_2 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
-#else
- unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
-#endif
- } *mmu2;
-
- struct bcr_mmu_3 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
- u_itlb:4, u_dtlb:4;
-#else
- unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
- ways:4, ver:8;
-#endif
- } *mmu3;
-
- struct bcr_mmu_4 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
- n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
-#else
- /* DTLB ITLB JES JE JA */
- unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
- pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
-#endif
- } *mmu4;
-
- tmp = read_aux_reg(ARC_REG_MMU_BCR);
- mmu->ver = (tmp >> 24);
-
- if (is_isa_arcompact()) {
- if (mmu->ver <= 2) {
- mmu2 = (struct bcr_mmu_1_2 *)&tmp;
- mmu->pg_sz_k = TO_KB(0x2000);
- mmu->sets = 1 << mmu2->sets;
- mmu->ways = 1 << mmu2->ways;
- mmu->u_dtlb = mmu2->u_dtlb;
- mmu->u_itlb = mmu2->u_itlb;
- } else {
- mmu3 = (struct bcr_mmu_3 *)&tmp;
- mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
- mmu->sets = 1 << mmu3->sets;
- mmu->ways = 1 << mmu3->ways;
- mmu->u_dtlb = mmu3->u_dtlb;
- mmu->u_itlb = mmu3->u_itlb;
- mmu->sasid = mmu3->sasid;
- }
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned int bcr, u_dtlb, u_itlb, sasid;
+ struct bcr_mmu_3 *mmu3;
+ struct bcr_mmu_4 *mmu4;
+ char super_pg[64] = "";
+ int n = 0;
+
+ bcr = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (bcr >> 24);
+
+ if (is_isa_arcompact() && mmu->ver == 3) {
+ mmu3 = (struct bcr_mmu_3 *)&bcr;
+ mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ u_dtlb = mmu3->u_dtlb;
+ u_itlb = mmu3->u_itlb;
+ sasid = mmu3->sasid;
} else {
- mmu4 = (struct bcr_mmu_4 *)&tmp;
+ mmu4 = (struct bcr_mmu_4 *)&bcr;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu->sets = 64 << mmu4->n_entry;
mmu->ways = mmu4->n_ways * 2;
- mmu->u_dtlb = mmu4->u_dtlb * 4;
- mmu->u_itlb = mmu4->u_itlb * 4;
- mmu->sasid = mmu4->sasid;
- pae_exists = mmu->pae = mmu4->pae;
+ u_dtlb = mmu4->u_dtlb * 4;
+ u_itlb = mmu4->u_itlb * 4;
+ sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
}
-}
-
-char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
- char super_pg[64] = "";
- if (p_mmu->s_pg_sz_m)
- scnprintf(super_pg, 64, "%dM Super Page %s",
- p_mmu->s_pg_sz_m,
- IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
+ if (mmu->s_pg_sz_m)
+ scnprintf(super_pg, 64, "/%dM%s",
+ mmu->s_pg_sz_m,
+ IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
- p_mmu->ver, p_mmu->pg_sz_k, super_pg,
- p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
- p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
+ "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
+ mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
+ mmu->sets, mmu->ways,
+ u_dtlb, u_itlb,
+ IS_AVAIL1(sasid, ", SASID"),
+ IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
- return buf;
+ return n;
}
int pae40_exist_but_not_enab(void)
{
- return pae_exists && !is_pae40_enabled();
+ return mmuinfo.pae && !is_pae40_enabled();
}
void arc_mmu_init(void)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- char str[256];
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
int compat = 0;
- pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
-
/*
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
/*
* stack top size sanity check,
- * Can't be done in processor.h due to header include depenedencies
+ * Can't be done in processor.h due to header include dependencies
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
/*
* Ensure that MMU features assumed by kernel exist in hardware.
- * For older ARC700 cpus, it has to be exact match, since the MMU
- * revisions were not backwards compatible (MMUv3 TLB layout changed
- * so even if kernel for v2 didn't use any new cmds of v3, it would
- * still not work.
- * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
- * (will run older software).
+ * - For older ARC700 cpus, only v3 supported
+ * - For HS cpus, v4 was baseline and v5 is backwards compatible
+ * (will run older software).
*/
- if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
+ if (is_isa_arcompact() && mmu->ver == 3)
compat = 1;
- else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
+ else if (is_isa_arcv2() && mmu->ver >= 4)
compat = 1;
- if (!compat) {
- panic("MMU ver %d doesn't match kernel built for %d...\n",
- mmu->ver, CONFIG_ARC_MMU_VER);
- }
+ if (!compat)
+ panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
@@ -843,14 +654,11 @@ void arc_mmu_init(void)
if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
panic("Hardware doesn't support PAE40\n");
- /* Enable the MMU */
- write_aux_reg(ARC_REG_PID, MMU_ENABLE);
+ /* Enable the MMU with ASID 0 */
+ mmu_setup_asid(NULL, 0);
- /* In smp we use this reg for interrupt 1 scratch */
-#ifdef ARC_USE_SCRATCH_REG
- /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
- write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
-#endif
+ /* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
+ mmu_setup_pgd(NULL, swapper_pg_dir);
if (pae40_exist_but_not_enab())
write_aux_reg(ARC_REG_TLBPD1HI, 0);
@@ -881,12 +689,12 @@ void arc_mmu_init(void)
* the duplicate one.
* -Knob to be verbose abt it.(TODO: hook them up to debugfs)
*/
-volatile int dup_pd_silent; /* Be slient abt it or complain (default) */
+volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
int set, n_ways = mmu->ways;
@@ -945,40 +753,3 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
local_irq_restore(flags);
}
-
-/***********************************************************************
- * Diagnostic Routines
- * -Called from Low Level TLB Hanlders if things don;t look good
- **********************************************************************/
-
-#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
-
-/*
- * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
- * don't match
- */
-void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
-{
- pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
- is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
-
- __asm__ __volatile__("flag 1");
-}
-
-void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
-{
- unsigned int mmu_asid;
-
- mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
-
- /*
- * At the time of a TLB miss/installation
- * - HW version needs to match SW version
- * - SW needs to have a valid ASID
- */
- if (addr < 0x70000000 &&
- ((mm_asid == MM_CTXT_NO_ASID) ||
- (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
- print_asid_mismatch(mm_asid, mmu_asid, 0);
-}
-#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 2efaf6ca0c06..e054780a8fe0 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -33,13 +33,12 @@
*/
#include <linux/linkage.h>
+#include <linux/pgtable.h>
#include <asm/entry.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
#include <asm/processor.h>
-#include <asm/tlb-mmu1.h>
#ifdef CONFIG_ISA_ARCOMPACT
;-----------------------------------------------------------------
@@ -94,11 +93,6 @@ ex_saved_reg1:
st_s r1, [r0, 4]
st_s r2, [r0, 8]
st_s r3, [r0, 12]
-
- ; VERIFY if the ASID in MMU-PID Reg is same as
- ; one in Linux data structures
-
- tlb_paranoid_check_asm
.endm
.macro TLBMISS_RESTORE_REGS
@@ -148,53 +142,16 @@ ex_saved_reg1:
#endif
;============================================================================
-; Troubleshooting Stuff
+;TLB Miss handling Code
;============================================================================
-; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
-; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
-; we use the MMU PID Reg to get current ASID.
-; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
-; So we try to detect this in TLB Mis shandler
-
-.macro tlb_paranoid_check_asm
-
-#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
-
- GET_CURR_TASK_ON_CPU r3
- ld r0, [r3, TASK_ACT_MM]
- ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
- breq r0, 0, 55f ; Error if no ASID allocated
-
- lr r1, [ARC_REG_PID]
- and r1, r1, 0xFF
-
- and r2, r0, 0xFF ; MMU PID bits only for comparison
- breq r1, r2, 5f
-
-55:
- ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
- lr r2, [erstatus]
- bbit0 r2, STATUS_U_BIT, 5f
-
- ; We sure are in troubled waters, Flag the error, but to do so
- ; need to switch to kernel mode stack to call error routine
- GET_TSK_STACK_BASE r3, sp
-
- ; Call printk to shoutout aloud
- mov r2, 1
- j print_asid_mismatch
-
-5: ; ASIDs match so proceed normally
- nop
-
+#ifndef PMD_SHIFT
+#define PMD_SHIFT PUD_SHIFT
#endif
-.endm
-
-;============================================================================
-;TLB Miss handling Code
-;============================================================================
+#ifndef PUD_SHIFT
+#define PUD_SHIFT PGDIR_SHIFT
+#endif
;-----------------------------------------------------------------------------
; This macro does the page-table lookup for the faulting address.
@@ -203,7 +160,7 @@ ex_saved_reg1:
lr r2, [efa]
-#ifdef ARC_USE_SCRATCH_REG
+#ifdef CONFIG_ISA_ARCV2
lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
#else
GET_CURR_TASK_ON_CPU r1
@@ -216,6 +173,24 @@ ex_saved_reg1:
tst r3, r3
bz do_slow_path_pf ; if no Page Table, do page fault
+#if CONFIG_PGTABLE_LEVELS > 3
+ lsr r0, r2, PUD_SHIFT ; Bits for indexing into PUD
+ and r0, r0, (PTRS_PER_PUD - 1)
+ ld.as r1, [r3, r0] ; PMD entry
+ tst r1, r1
+ bz do_slow_path_pf
+ mov r3, r1
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+ lsr r0, r2, PMD_SHIFT ; Bits for indexing into PMD
+ and r0, r0, (PTRS_PER_PMD - 1)
+ ld.as r1, [r3, r0] ; PMD entry
+ tst r1, r1
+ bz do_slow_path_pf
+ mov r3, r1
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
and.f 0, r3, _PAGE_HW_SZ ; Is this Huge PMD (thp)
add2.nz r1, r1, r0
@@ -279,14 +254,7 @@ ex_saved_reg1:
; Commit the TLB entry into MMU
.macro COMMIT_ENTRY_TO_MMU
-#if (CONFIG_ARC_MMU_VER < 4)
-
-#ifdef CONFIG_EZNPS_MTM_EXT
- /* verify if entry for this vaddr+ASID already exists */
- sr TLBProbe, [ARC_REG_TLBCOMMAND]
- lr r0, [ARC_REG_TLBINDEX]
- bbit0 r0, 31, 88f
-#endif
+#ifdef CONFIG_ARC_MMU_V3
/* Get free TLB slot: Set = computed from vaddr, way = random */
sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
@@ -382,13 +350,6 @@ ENTRY(EV_TLBMissD)
CONV_PTE_TO_TLB
-#if (CONFIG_ARC_MMU_VER == 1)
- ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
- ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
- ; But only for old MMU or one with Metal Fix
- TLB_WRITE_HEURISTICS
-#endif
-
COMMIT_ENTRY_TO_MMU
TLBMISS_RESTORE_REGS
EV_TLBMissD_fast_ret: ; additional label for VDK OS-kit instrumentation