summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-aurora-l2.h55
-rw-r--r--arch/arm/mm/cache-l2x0.c18
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/fault.h1
-rw-r--r--arch/arm/mm/mmu.c19
5 files changed, 37 insertions, 60 deletions
diff --git a/arch/arm/mm/cache-aurora-l2.h b/arch/arm/mm/cache-aurora-l2.h
deleted file mode 100644
index c86124769831..000000000000
--- a/arch/arm/mm/cache-aurora-l2.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * AURORA shared L2 cache controller support
- *
- * Copyright (C) 2012 Marvell
- *
- * Yehuda Yitschak <yehuday@marvell.com>
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#ifndef __ASM_ARM_HARDWARE_AURORA_L2_H
-#define __ASM_ARM_HARDWARE_AURORA_L2_H
-
-#define AURORA_SYNC_REG 0x700
-#define AURORA_RANGE_BASE_ADDR_REG 0x720
-#define AURORA_FLUSH_PHY_ADDR_REG 0x7f0
-#define AURORA_INVAL_RANGE_REG 0x774
-#define AURORA_CLEAN_RANGE_REG 0x7b4
-#define AURORA_FLUSH_RANGE_REG 0x7f4
-
-#define AURORA_ACR_REPLACEMENT_OFFSET 27
-#define AURORA_ACR_REPLACEMENT_MASK \
- (0x3 << AURORA_ACR_REPLACEMENT_OFFSET)
-#define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \
- (0 << AURORA_ACR_REPLACEMENT_OFFSET)
-#define AURORA_ACR_REPLACEMENT_TYPE_LFSR \
- (1 << AURORA_ACR_REPLACEMENT_OFFSET)
-#define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \
- (3 << AURORA_ACR_REPLACEMENT_OFFSET)
-
-#define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0
-#define AURORA_ACR_FORCE_WRITE_POLICY_MASK \
- (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
-#define AURORA_ACR_FORCE_WRITE_POLICY_DIS \
- (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
-#define AURORA_ACR_FORCE_WRITE_BACK_POLICY \
- (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
-#define AURORA_ACR_FORCE_WRITE_THRO_POLICY \
- (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET)
-
-#define MAX_RANGE_SIZE 1024
-
-#define AURORA_WAY_SIZE_SHIFT 2
-
-#define AURORA_CTRL_FW 0x100
-
-/* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make
- * the distinction between a number coming from hardware and a number
- * coming from the device tree */
-#define AURORA_CACHE_ID 0x100
-
-#endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 5b251c8ecd45..12c26eb88afb 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -18,8 +18,8 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/cache-aurora-l2.h>
#include "cache-tauros3.h"
-#include "cache-aurora-l2.h"
struct l2c_init_data {
const char *type;
@@ -1352,8 +1352,8 @@ static unsigned long aurora_range_end(unsigned long start, unsigned long end)
* since cache range operations stall the CPU pipeline
* until completion.
*/
- if (end > start + MAX_RANGE_SIZE)
- end = start + MAX_RANGE_SIZE;
+ if (end > start + AURORA_MAX_RANGE_SIZE)
+ end = start + AURORA_MAX_RANGE_SIZE;
/*
* Cache range operations can't straddle a page boundary.
@@ -1493,6 +1493,18 @@ static void __init aurora_of_parse(const struct device_node *np,
mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
}
+ if (of_property_read_bool(np, "marvell,ecc-enable")) {
+ mask |= AURORA_ACR_ECC_EN;
+ val |= AURORA_ACR_ECC_EN;
+ }
+
+ if (of_property_read_bool(np, "arm,parity-enable")) {
+ mask |= AURORA_ACR_PARITY_EN;
+ val |= AURORA_ACR_PARITY_EN;
+ } else if (of_property_read_bool(np, "arm,parity-disable")) {
+ mask |= AURORA_ACR_PARITY_EN;
+ }
+
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 890eeaac3cbb..bd0f4821f7e1 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -191,7 +191,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
- if (fsr & FSR_WRITE)
+ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
mask = VM_WRITE;
if (fsr & FSR_LNX_PF)
mask = VM_EXEC;
@@ -262,7 +262,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (fsr & FSR_WRITE)
+ if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
flags |= FAULT_FLAG_WRITE;
/*
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index c063708fa503..9ecc2097a87a 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -6,6 +6,7 @@
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
+#define FSR_CM (1 << 13)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d9a0038774a6..25da9b2d9610 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1177,10 +1177,29 @@ void __init adjust_lowmem_bounds(void)
*/
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+ /*
+ * The first usable region must be PMD aligned. Mark its start
+ * as MEMBLOCK_NOMAP if it isn't
+ */
+ for_each_memblock(memory, reg) {
+ if (!memblock_is_nomap(reg)) {
+ if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
+ phys_addr_t len;
+
+ len = round_up(reg->base, PMD_SIZE) - reg->base;
+ memblock_mark_nomap(reg->base, len);
+ }
+ break;
+ }
+ }
+
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
+ if (memblock_is_nomap(reg))
+ continue;
+
if (reg->base < vmalloc_limit) {
if (block_end > lowmem_limit)
/*