summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-07-07 12:35:33 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-07-07 12:35:33 +0100
commit06be5eefe1192eb8ce8d07497f67595b6bfe9741 (patch)
tree80f1987d4970f8079681f8be0c135cafc8d6329a /arch/arm/mm
parent11b8b25ce4f8acfd3b438683c0c9ade27756c6e8 (diff)
parent1bd46782d08b01b73df0085b51ea1021b19b44fd (diff)
Merge branches 'fixes' and 'ioremap' into for-linus
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig24
-rw-r--r--arch/arm/mm/Makefile3
-rw-r--r--arch/arm/mm/cache-l2x0.c107
-rw-r--r--arch/arm/mm/dma-mapping.c32
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm/mm/hugetlbpage.c5
-rw-r--r--arch/arm/mm/init.c1
-rw-r--r--arch/arm/mm/ioremap.c33
-rw-r--r--arch/arm/mm/mmu.c153
-rw-r--r--arch/arm/mm/nommu.c48
-rw-r--r--arch/arm/mm/proc-v7-2level.S12
-rw-r--r--arch/arm/mm/proc-v7-3level.S14
-rw-r--r--arch/arm/mm/proc-v7.S184
-rw-r--r--arch/arm/mm/proc-v7m.S2
-rw-r--r--arch/arm/mm/pv-fixup-asm.S88
16 files changed, 450 insertions, 261 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index b4f92b9a13ac..7c6b976ab8d3 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -6,7 +6,7 @@ comment "Processor Type"
# ARM7TDMI
config CPU_ARM7TDMI
- bool "Support ARM7TDMI processor"
+ bool
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
@@ -56,7 +56,7 @@ config CPU_ARM740T
# ARM9TDMI
config CPU_ARM9TDMI
- bool "Support ARM9TDMI processor"
+ bool
depends on !MMU
select CPU_32v4T
select CPU_ABRT_NOMMU
@@ -604,6 +604,22 @@ config CPU_USE_DOMAINS
This option enables or disables the use of domain switching
via the set_fs() function.
+config CPU_V7M_NUM_IRQ
+ int "Number of external interrupts connected to the NVIC"
+ depends on CPU_V7M
+ default 90 if ARCH_STM32
+ default 38 if ARCH_EFM32
+ default 112 if SOC_VF610
+ default 240
+ help
+ This option indicates the number of interrupts connected to the NVIC.
+ The value can be larger than the real number of interrupts supported
+ by the system, but must not be lower.
+ The default value is 240, corresponding to the maximum number of
+ interrupts supported by the NVIC on Cortex-M family.
+
+ If unsure, keep default value.
+
#
# CPU supports 36-bit I/O
#
@@ -624,6 +640,10 @@ config ARM_LPAE
If unsure, say N.
+config ARM_PV_FIXUP
+ def_bool y
+ depends on ARM_LPAE && ARM_PATCH_PHYS_VIRT && ARCH_KEYSTONE
+
config ARCH_PHYS_ADDR_T_64BIT
def_bool ARM_LPAE
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index d3afdf9eb65a..57c8df500e8c 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o
obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
@@ -55,6 +56,8 @@ obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o
obj-$(CONFIG_CPU_COPY_FA) += copypage-fa.o
+CFLAGS_copypage-feroceon.o := -march=armv5te
+
obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o
obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o
obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index e309c8f35af5..71b3d3309024 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -38,10 +38,11 @@ struct l2c_init_data {
unsigned way_size_0;
unsigned num_lock;
void (*of_parse)(const struct device_node *, u32 *, u32 *);
- void (*enable)(void __iomem *, u32, unsigned);
+ void (*enable)(void __iomem *, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
void (*configure)(void __iomem *);
+ void (*unlock)(void __iomem *, unsigned);
struct outer_cache_fns outer_cache;
};
@@ -110,14 +111,6 @@ static inline void l2c_unlock(void __iomem *base, unsigned num)
static void l2c_configure(void __iomem *base)
{
- if (outer_cache.configure) {
- outer_cache.configure(&l2x0_saved_regs);
- return;
- }
-
- if (l2x0_data->configure)
- l2x0_data->configure(base);
-
l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
}
@@ -125,18 +118,16 @@ static void l2c_configure(void __iomem *base)
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
*/
-static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
+static void l2c_enable(void __iomem *base, unsigned num_lock)
{
unsigned long flags;
- /* Do not touch the controller if already enabled. */
- if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
- return;
-
- l2x0_saved_regs.aux_ctrl = aux;
- l2c_configure(base);
+ if (outer_cache.configure)
+ outer_cache.configure(&l2x0_saved_regs);
+ else
+ l2x0_data->configure(base);
- l2c_unlock(base, num_lock);
+ l2x0_data->unlock(base, num_lock);
local_irq_save(flags);
__l2c_op_way(base + L2X0_INV_WAY);
@@ -163,7 +154,11 @@ static void l2c_save(void __iomem *base)
static void l2c_resume(void)
{
- l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
+ void __iomem *base = l2x0_base;
+
+ /* Do not touch the controller if already enabled. */
+ if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
+ l2c_enable(base, l2x0_data->num_lock);
}
/*
@@ -252,6 +247,8 @@ static const struct l2c_init_data l2c210_data __initconst = {
.num_lock = 1,
.enable = l2c_enable,
.save = l2c_save,
+ .configure = l2c_configure,
+ .unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -391,16 +388,22 @@ static void l2c220_sync(void)
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
+static void l2c220_enable(void __iomem *base, unsigned num_lock)
{
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
- aux |= L220_AUX_CTRL_NS_LOCKDOWN;
+ l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
- l2c_enable(base, aux, num_lock);
+ l2c_enable(base, num_lock);
+}
+
+static void l2c220_unlock(void __iomem *base, unsigned num_lock)
+{
+ if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
+ l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c220_data = {
@@ -409,6 +412,8 @@ static const struct l2c_init_data l2c220_data = {
.num_lock = 1,
.enable = l2c220_enable,
.save = l2c_save,
+ .configure = l2c_configure,
+ .unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
@@ -569,6 +574,8 @@ static void l2c310_configure(void __iomem *base)
{
unsigned revision;
+ l2c_configure(base);
+
/* restore pl310 setup */
l2c_write_sec(l2x0_saved_regs.tag_latency, base,
L310_TAG_LATENCY_CTRL);
@@ -603,10 +610,11 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
return NOTIFY_OK;
}
-static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
+static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
{
unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
+ u32 aux = l2x0_saved_regs.aux_ctrl;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
if (cortex_a9) {
@@ -649,9 +657,9 @@ static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
- aux |= L310_AUX_CTRL_NS_LOCKDOWN;
+ l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
- l2c_enable(base, aux, num_lock);
+ l2c_enable(base, num_lock);
/* Read back resulting AUX_CTRL value as it could have been altered. */
aux = readl_relaxed(base + L2X0_AUX_CTRL);
@@ -755,6 +763,12 @@ static void l2c310_resume(void)
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
}
+static void l2c310_unlock(void __iomem *base, unsigned num_lock)
+{
+ if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
+ l2c_unlock(base, num_lock);
+}
+
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
@@ -763,6 +777,7 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
+ .unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -856,8 +871,11 @@ static int __init __l2c_init(const struct l2c_init_data *data,
* Check if l2x0 controller is already enabled. If we are booting
* in non-secure mode accessing the below registers will fault.
*/
- if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
- data->enable(l2x0_base, aux, data->num_lock);
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
+ l2x0_saved_regs.aux_ctrl = aux;
+
+ data->enable(l2x0_base, data->num_lock);
+ }
outer_cache = fns;
@@ -1066,6 +1084,8 @@ static const struct l2c_init_data of_l2c210_data __initconst = {
.of_parse = l2x0_of_parse,
.enable = l2c_enable,
.save = l2c_save,
+ .configure = l2c_configure,
+ .unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1084,6 +1104,8 @@ static const struct l2c_init_data of_l2c220_data __initconst = {
.of_parse = l2x0_of_parse,
.enable = l2c220_enable,
.save = l2c_save,
+ .configure = l2c_configure,
+ .unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
@@ -1199,6 +1221,26 @@ static void __init l2c310_of_parse(const struct device_node *np,
pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
}
+ ret = of_property_read_u32(np, "prefetch-data", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF prefetch-data property value is missing\n");
+ }
+
+ ret = of_property_read_u32(np, "prefetch-instr", &val);
+ if (ret == 0) {
+ if (val)
+ prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ else
+ prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF prefetch-instr property value is missing\n");
+ }
+
l2x0_saved_regs.prefetch_ctrl = prefetch;
}
@@ -1211,6 +1253,7 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
+ .unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1240,6 +1283,7 @@ static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
+ .unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
@@ -1366,7 +1410,7 @@ static void aurora_save(void __iomem *base)
* For Aurora cache in no outer mode, enable via the CP15 coprocessor
* broadcasting of cache commands to L2.
*/
-static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
+static void __init aurora_enable_no_outer(void __iomem *base,
unsigned num_lock)
{
u32 u;
@@ -1377,7 +1421,7 @@ static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
isb();
- l2c_enable(base, aux, num_lock);
+ l2c_enable(base, num_lock);
}
static void __init aurora_fixup(void __iomem *base, u32 cache_id,
@@ -1416,6 +1460,8 @@ static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.enable = l2c_enable,
.fixup = aurora_fixup,
.save = aurora_save,
+ .configure = l2c_configure,
+ .unlock = l2c_unlock,
.outer_cache = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
@@ -1435,6 +1481,8 @@ static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.enable = aurora_enable_no_outer,
.fixup = aurora_fixup,
.save = aurora_save,
+ .configure = l2c_configure,
+ .unlock = l2c_unlock,
.outer_cache = {
.resume = l2c_resume,
},
@@ -1585,6 +1633,7 @@ static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.enable = l2c310_enable,
.save = l2c310_save,
.configure = l2c310_configure,
+ .unlock = l2c310_unlock,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
@@ -1608,6 +1657,7 @@ static void __init tauros3_save(void __iomem *base)
static void tauros3_configure(void __iomem *base)
{
+ l2c_configure(base);
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
base + TAUROS3_AUX2_CTRL);
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
@@ -1621,6 +1671,7 @@ static const struct l2c_init_data of_tauros3_data __initconst = {
.enable = l2c_enable,
.save = tauros3_save,
.configure = tauros3_configure,
+ .unlock = l2c_unlock,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
.resume = l2c_resume,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7e7583ddd607..1ced8a0f7a52 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -148,11 +148,14 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs);
+static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
.free = arm_coherent_dma_free,
- .mmap = arm_dma_mmap,
+ .mmap = arm_coherent_dma_mmap,
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
@@ -690,10 +693,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
attrs, __builtin_return_address(0));
}
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
@@ -704,8 +704,6 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn = dma_to_pfn(dev, dma_addr);
unsigned long off = vma->vm_pgoff;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -721,6 +719,26 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
}
/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+#ifdef CONFIG_MMU
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+#endif /* CONFIG_MMU */
+ return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+/*
* Free a buffer as defined by the above mapping.
*/
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 6333d9c17875..0d629b8f973f 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -276,7 +276,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (faulthandler_disabled() || !mm)
goto no_context;
if (user_mode(regs))
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index b98895d9fe57..ee8dfa793989 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
void *kmap;
int type;
+ preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
+ preempt_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
int idx, type;
struct page *page = pfn_to_page(pfn);
+ preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index c72412415093..fcafb521f14e 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -41,11 +41,6 @@ int pud_huge(pud_t pud)
return 0;
}
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- return 0;
-}
-
int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index be92fa0f2f35..8a63b4cdc0f2 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -268,6 +268,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
+ early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d1e5ad7ab3bc..0c81056c1dd7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
-void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned int mtype)
{
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
- __builtin_return_address(0));
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *) =
__arm_ioremap_caller;
-void __iomem *
-__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
- return arch_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
}
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap_wc);
/*
* Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
-void __arm_iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *cookie)
{
- arch_iounmap(io_addr);
+ arch_iounmap(cookie);
}
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
#ifdef CONFIG_PCI
static int pci_ioremap_mem_type = MT_DEVICE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 79de062c6077..870838a46d52 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1393,123 +1393,98 @@ static void __init map_lowmem(void)
}
}
-#ifdef CONFIG_ARM_LPAE
+#ifdef CONFIG_ARM_PV_FIXUP
+extern unsigned long __atags_pointer;
+typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
+pgtables_remap lpae_pgtables_remap_asm;
+
/*
* early_paging_init() recreates boot time page table setup, allowing machines
* to switch over to a high (>4G) address space on LPAE systems
*/
-void __init early_paging_init(const struct machine_desc *mdesc,
- struct proc_info_list *procinfo)
+void __init early_paging_init(const struct machine_desc *mdesc)
{
- pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
- unsigned long map_start, map_end;
- pgd_t *pgd0, *pgdk;
- pud_t *pud0, *pudk, *pud_start;
- pmd_t *pmd0, *pmdk;
- phys_addr_t phys;
- int i;
+ pgtables_remap *lpae_pgtables_remap;
+ unsigned long pa_pgd;
+ unsigned int cr, ttbcr;
+ long long offset;
+ void *boot_data;
- if (!(mdesc->init_meminfo))
+ if (!mdesc->pv_fixup)
return;
- /* remap kernel code and data */
- map_start = init_mm.start_code & PMD_MASK;
- map_end = ALIGN(init_mm.brk, PMD_SIZE);
+ offset = mdesc->pv_fixup();
+ if (offset == 0)
+ return;
- /* get a handle on things... */
- pgd0 = pgd_offset_k(0);
- pud_start = pud0 = pud_offset(pgd0, 0);
- pmd0 = pmd_offset(pud0, 0);
+ /*
+ * Get the address of the remap function in the 1:1 identity
+ * mapping setup by the early page table assembly code. We
+ * must get this prior to the pv update. The following barrier
+ * ensures that this is complete before we fixup any P:V offsets.
+ */
+ lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
+ pa_pgd = __pa(swapper_pg_dir);
+ boot_data = __va(__atags_pointer);
+ barrier();
- pgdk = pgd_offset_k(map_start);
- pudk = pud_offset(pgdk, map_start);
- pmdk = pmd_offset(pudk, map_start);
+ pr_info("Switching physical address space to 0x%08llx\n",
+ (u64)PHYS_OFFSET + offset);
- mdesc->init_meminfo();
+ /* Re-set the phys pfn offset, and the pv offset */
+ __pv_offset += offset;
+ __pv_phys_pfn_offset += PFN_DOWN(offset);
/* Run the patch stub to update the constants */
fixup_pv_table(&__pv_table_begin,
(&__pv_table_end - &__pv_table_begin) << 2);
/*
- * Cache cleaning operations for self-modifying code
- * We should clean the entries by MVA but running a
- * for loop over every pv_table entry pointer would
- * just complicate the code.
- */
- flush_cache_louis();
- dsb(ishst);
- isb();
-
- /*
- * FIXME: This code is not architecturally compliant: we modify
- * the mappings in-place, indeed while they are in use by this
- * very same code. This may lead to unpredictable behaviour of
- * the CPU.
- *
- * Even modifying the mappings in a separate page table does
- * not resolve this.
- *
- * The architecture strongly recommends that when a mapping is
- * changed, that it is changed by first going via an invalid
- * mapping and back to the new mapping. This is to ensure that
- * no TLB conflicts (caused by the TLB having more than one TLB
- * entry match a translation) can occur. However, doing that
- * here will result in unmapping the code we are running.
- */
- pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
- add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
-
- /*
- * Remap level 1 table. This changes the physical addresses
- * used to refer to the level 2 page tables to the high
- * physical address alias, leaving everything else the same.
- */
- for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
- set_pud(pud0,
- __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
- pmd0 += PTRS_PER_PMD;
- }
-
- /*
- * Remap the level 2 table, pointing the mappings at the high
- * physical address alias of these pages.
- */
- phys = __pa(map_start);
- do {
- *pmdk++ = __pmd(phys | pmdprot);
- phys += PMD_SIZE;
- } while (phys < map_end);
-
- /*
- * Ensure that the above updates are flushed out of the cache.
- * This is not strictly correct; on a system where the caches
- * are coherent with each other, but the MMU page table walks
- * may not be coherent, flush_cache_all() may be a no-op, and
- * this will fail.
+ * We changing not only the virtual to physical mapping, but also
+ * the physical addresses used to access memory. We need to flush
+ * all levels of cache in the system with caching disabled to
+ * ensure that all data is written back, and nothing is prefetched
+ * into the caches. We also need to prevent the TLB walkers
+ * allocating into the caches too. Note that this is ARMv7 LPAE
+ * specific.
*/
+ cr = get_cr();
+ set_cr(cr & ~(CR_I | CR_C));
+ asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
+ asm volatile("mcr p15, 0, %0, c2, c0, 2"
+ : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
flush_cache_all();
/*
- * Re-write the TTBR values to point them at the high physical
- * alias of the page tables. We expect __va() will work on
- * cpu_get_pgd(), which returns the value of TTBR0.
+ * Fixup the page tables - this must be in the idmap region as
+ * we need to disable the MMU to do this safely, and hence it
+ * needs to be assembly. It's fairly simple, as we're using the
+ * temporary tables setup by the initial assembly code.
*/
- cpu_switch_mm(pgd0, &init_mm);
- cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+ lpae_pgtables_remap(offset, pa_pgd, boot_data);
- /* Finally flush any stale TLB values. */
- local_flush_bp_all();
- local_flush_tlb_all();
+ /* Re-enable the caches and cacheable TLB walks */
+ asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
+ set_cr(cr);
}
#else
-void __init early_paging_init(const struct machine_desc *mdesc,
- struct proc_info_list *procinfo)
+void __init early_paging_init(const struct machine_desc *mdesc)
{
- if (mdesc->init_meminfo)
- mdesc->init_meminfo();
+ long long offset;
+
+ if (!mdesc->pv_fixup)
+ return;
+
+ offset = mdesc->pv_fixup();
+ if (offset == 0)
+ return;
+
+ pr_crit("Physical address space modification is only to support Keystone2.\n");
+ pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
+ pr_crit("feature. Your kernel may crash now, have a good day.\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
}
#endif
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index a014dfacd5ca..1dd10936d68d 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -304,15 +304,6 @@ void __init sanity_check_meminfo(void)
}
/*
- * early_paging_init() recreates boot time page table setup, allowing machines
- * to switch over to a high (>4G) address space on LPAE systems
- */
-void __init early_paging_init(const struct machine_desc *mdesc,
- struct proc_info_list *procinfo)
-{
-}
-
-/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
@@ -360,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
- size_t size, unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
+ return (void __iomem *)phys_addr;
}
-void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
- unsigned int mtype)
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
- return (void __iomem *)phys_addr;
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
}
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap);
-void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
-void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iounmap(volatile void __iomem *addr)
{
- return __arm_ioremap(phys_addr, size, mtype);
}
+EXPORT_SYMBOL(__iounmap);
void (*arch_iounmap)(volatile void __iomem *);
-void __arm_iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
}
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 10405b8d31af..c6141a5435c3 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -36,14 +36,16 @@
*
* It is assumed that:
* - we are not using split page tables
+ *
+ * Note that we always need to flush BTAC/BTB if IBE is set
+ * even on Cortex-A8 revisions not affected by 430973.
+ * If IBE is not set, the flush BTAC/BTB won't do anything.
*/
ENTRY(cpu_ca8_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
-#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
-#endif
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
@@ -148,10 +150,10 @@ ENDPROC(cpu_v7_set_pte_ext)
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttb0 and \ttb1 updated with the corresponding flags.
*/
- .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+ .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
- ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
- ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP)
+ ALT_SMP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_SMP)
+ ALT_UP(orr \ttbr0l, \ttbr0l, #TTB_FLAGS_UP)
ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index d3daed0ae0ad..5e5720e8bc5f 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -126,11 +126,10 @@ ENDPROC(cpu_v7_set_pte_ext)
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttbr1 updated.
*/
- .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
+ .macro v7_ttb_setup, zero, ttbr0l, ttbr0h, ttbr1, tmp
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
- mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT
- cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET?
- mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
+ cmp \ttbr1, \tmp, lsr #12 @ PHYS_OFFSET > PAGE_OFFSET?
+ mrc p15, 0, \tmp, c2, c0, 2 @ TTB control egister
orr \tmp, \tmp, #TTB_EAE
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
@@ -143,13 +142,10 @@ ENDPROC(cpu_v7_set_pte_ext)
*/
orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ
mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR
- mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
- mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits
+ mov \tmp, \ttbr1, lsr #20
+ mov \ttbr1, \ttbr1, lsl #12
addls \ttbr1, \ttbr1, #TTBR1_OFFSET
mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1
- mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits
- mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits
- mcrr p15, 0, \ttbr0, \tmp, c2 @ load TTBR0
.endm
/*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3d1054f11a8a..0716bbe19872 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -252,6 +252,12 @@ ENDPROC(cpu_pj4b_do_resume)
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
+ * r1, r2, r4, r5, r9, r13 must be preserved - r13 is not a stack
+ * r4: TTBR0 (low word)
+ * r5: TTBR0 (high word if LPAE)
+ * r8: TTBR1
+ * r9: Main ID register
+ *
* This should be able to cover all ARMv7 cores.
*
* It is assumed that:
@@ -279,6 +285,78 @@ __v7_ca17mp_setup:
#endif
b __v7_setup
+/*
+ * Errata:
+ * r0, r10 available for use
+ * r1, r2, r4, r5, r9, r13: must be preserved
+ * r3: contains MIDR rX number in bits 23-20
+ * r6: contains MIDR rXpY as 8-bit XY number
+ * r9: MIDR
+ */
+__ca8_errata:
+#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
+ teq r3, #0x00100000 @ only present in r1p*
+ mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
+ orreq r0, r0, #(1 << 6) @ set IBE to 1
+ mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
+#endif
+#ifdef CONFIG_ARM_ERRATA_458693
+ teq r6, #0x20 @ only present in r2p0
+ mrceq p15, 0, r0, c1, c0, 1 @ read aux control register
+ orreq r0, r0, #(1 << 5) @ set L1NEON to 1
+ orreq r0, r0, #(1 << 9) @ set PLDNOP to 1
+ mcreq p15, 0, r0, c1, c0, 1 @ write aux control register
+#endif
+#ifdef CONFIG_ARM_ERRATA_460075
+ teq r6, #0x20 @ only present in r2p0
+ mrceq p15, 1, r0, c9, c0, 2 @ read L2 cache aux ctrl register
+ tsteq r0, #1 << 22
+ orreq r0, r0, #(1 << 22) @ set the Write Allocate disable bit
+ mcreq p15, 1, r0, c9, c0, 2 @ write the L2 cache aux ctrl register
+#endif
+ b __errata_finish
+
+__ca9_errata:
+#ifdef CONFIG_ARM_ERRATA_742230
+ cmp r6, #0x22 @ only present up to r2p2
+ mrcle p15, 0, r0, c15, c0, 1 @ read diagnostic register
+ orrle r0, r0, #1 << 4 @ set bit #4
+ mcrle p15, 0, r0, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_742231
+ teq r6, #0x20 @ present in r2p0
+ teqne r6, #0x21 @ present in r2p1
+ teqne r6, #0x22 @ present in r2p2
+ mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
+ orreq r0, r0, #1 << 12 @ set bit #12
+ orreq r0, r0, #1 << 22 @ set bit #22
+ mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_743622
+ teq r3, #0x00200000 @ only present in r2p*
+ mrceq p15, 0, r0, c15, c0, 1 @ read diagnostic register
+ orreq r0, r0, #1 << 6 @ set bit #6
+ mcreq p15, 0, r0, c15, c0, 1 @ write diagnostic register
+#endif
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+ ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
+ ALT_UP_B(1f)
+ mrclt p15, 0, r0, c15, c0, 1 @ read diagnostic register
+ orrlt r0, r0, #1 << 11 @ set bit #11
+ mcrlt p15, 0, r0, c15, c0, 1 @ write diagnostic register
+1:
+#endif
+ b __errata_finish
+
+__ca15_errata:
+#ifdef CONFIG_ARM_ERRATA_773022
+ cmp r6, #0x4 @ only present up to r0p4
+ mrcle p15, 0, r0, c1, c0, 1 @ read aux control register
+ orrle r0, r0, #1 << 1 @ disable loop buffer
+ mcrle p15, 0, r0, c1, c0, 1 @ write aux control register
+#endif
+ b __errata_finish
+
__v7_pj4b_setup:
#ifdef CONFIG_CPU_PJ4B
@@ -336,99 +414,41 @@ __v7_pj4b_setup:
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
- bl v7_flush_dcache_louis
+ bl v7_invalidate_l1
ldmia r12, {r0-r5, r7, r9, r11, lr}
- mrc p15, 0, r0, c0, c0, 0 @ read main ID register
- and r10, r0, #0xff000000 @ ARM?
- teq r10, #0x41000000
- bne 3f
- and r5, r0, #0x00f00000 @ variant
- and r6, r0, #0x0000000f @ revision
- orr r6, r6, r5, lsr #20-4 @ combine variant and revision
- ubfx r0, r0, #4, #12 @ primary part number
+ and r0, r9, #0xff000000 @ ARM?
+ teq r0, #0x41000000
+ bne __errata_finish
+ and r3, r9, #0x00f00000 @ variant
+ and r6, r9, #0x0000000f @ revision
+ orr r6, r6, r3, lsr #20-4 @ combine variant and revision
+ ubfx r0, r9, #4, #12 @ primary part number
/* Cortex-A8 Errata */
ldr r10, =0x00000c08 @ Cortex-A8 primary part number
teq r0, r10
- bne 2f
-#if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM)
-
- teq r5, #0x00100000 @ only present in r1p*
- mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
- orreq r10, r10, #(1 << 6) @ set IBE to 1
- mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
-#endif
-#ifdef CONFIG_ARM_ERRATA_458693
- teq r6, #0x20 @ only present in r2p0
- mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
- orreq r10, r10, #(1 << 5) @ set L1NEON to 1
- orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
- mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
-#endif
-#ifdef CONFIG_ARM_ERRATA_460075
- teq r6, #0x20 @ only present in r2p0
- mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
- tsteq r10, #1 << 22
- orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
- mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
-#endif
- b 3f
+ beq __ca8_errata
/* Cortex-A9 Errata */
-2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
+ ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
- bne 3f
-#ifdef CONFIG_ARM_ERRATA_742230
- cmp r6, #0x22 @ only present up to r2p2
- mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orrle r10, r10, #1 << 4 @ set bit #4
- mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
-#ifdef CONFIG_ARM_ERRATA_742231
- teq r6, #0x20 @ present in r2p0
- teqne r6, #0x21 @ present in r2p1
- teqne r6, #0x22 @ present in r2p2
- mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orreq r10, r10, #1 << 12 @ set bit #12
- orreq r10, r10, #1 << 22 @ set bit #22
- mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
-#ifdef CONFIG_ARM_ERRATA_743622
- teq r5, #0x00200000 @ only present in r2p*
- mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orreq r10, r10, #1 << 6 @ set bit #6
- mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
-#endif
-#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
- ALT_SMP(cmp r6, #0x30) @ present prior to r3p0
- ALT_UP_B(1f)
- mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
- orrlt r10, r10, #1 << 11 @ set bit #11
- mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
-1:
-#endif
+ beq __ca9_errata
/* Cortex-A15 Errata */
-3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
+ ldr r10, =0x00000c0f @ Cortex-A15 primary part number
teq r0, r10
- bne 4f
+ beq __ca15_errata
-#ifdef CONFIG_ARM_ERRATA_773022
- cmp r6, #0x4 @ only present up to r0p4
- mrcle p15, 0, r10, c1, c0, 1 @ read aux control register
- orrle r10, r10, #1 << 1 @ disable loop buffer
- mcrle p15, 0, r10, c1, c0, 1 @ write aux control register
-#endif
-
-4: mov r10, #0
+__errata_finish:
+ mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
- v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
- ldr r5, =PRRR @ PRRR
+ v7_ttb_setup r10, r4, r5, r8, r3 @ TTBCR, TTBRx setup
+ ldr r3, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
- mcr p15, 0, r5, c10, c2, 0 @ write PRRR
+ mcr p15, 0, r3, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
dsb @ Complete invalidations
@@ -437,22 +457,22 @@ __v7_setup:
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
teq r0, #(1 << 12) @ check if ThumbEE is present
bne 1f
- mov r5, #0
- mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
+ mov r3, #0
+ mcr p14, 6, r3, c1, c0, 0 @ Initialize TEEHBR to 0
mrc p14, 6, r0, c0, c0, 0 @ load TEECR
orr r0, r0, #1 @ set the 1st bit in order to
mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
1:
#endif
- adr r5, v7_crval
- ldmia r5, {r5, r6}
+ adr r3, v7_crval
+ ldmia r3, {r3, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
#ifdef CONFIG_SWP_EMULATE
- orr r5, r5, #(1 << 10) @ set SW bit in "clear"
+ orr r3, r3, #(1 << 10) @ set SW bit in "clear"
bic r6, r6, #(1 << 10) @ clear it in "mmuset"
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control register
- bic r0, r0, r5 @ clear bits them
+ bic r0, r0, r3 @ clear bits them
orr r0, r0, r6 @ set them
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
ret lr @ return to head.S:__ret
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index e08e1f2bab76..67d9209077c6 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -98,7 +98,7 @@ __v7m_setup:
str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority
@ SVC to run the kernel in this mode
- adr r1, BSYM(1f)
+ badr r1, 1f
ldr r5, [r12, #11 * 4] @ read the SVC vector entry
str r1, [r12, #11 * 4] @ write the temporary SVC vector entry
mov r6, lr @ save LR
diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S
new file mode 100644
index 000000000000..1867f3e43016
--- /dev/null
+++ b/arch/arm/mm/pv-fixup-asm.S
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This assembly is required to safely remap the physical address space
+ * for Keystone 2
+ */
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/cp15.h>
+#include <asm/memory.h>
+#include <asm/pgtable.h>
+
+ .section ".idmap.text", "ax"
+
+#define L1_ORDER 3
+#define L2_ORDER 3
+
+ENTRY(lpae_pgtables_remap_asm)
+ stmfd sp!, {r4-r8, lr}
+
+ mrc p15, 0, r8, c1, c0, 0 @ read control reg
+ bic ip, r8, #CR_M @ disable caches and MMU
+ mcr p15, 0, ip, c1, c0, 0
+ dsb
+ isb
+
+ /* Update level 2 entries covering the kernel */
+ ldr r6, =(_end - 1)
+ add r7, r2, #0x1000
+ add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
+ add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
+1: ldrd r4, [r7]
+ adds r4, r4, r0
+ adc r5, r5, r1
+ strd r4, [r7], #1 << L2_ORDER
+ cmp r7, r6
+ bls 1b
+
+ /* Update level 2 entries for the boot data */
+ add r7, r2, #0x1000
+ add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
+ bic r7, r7, #(1 << L2_ORDER) - 1
+ ldrd r4, [r7]
+ adds r4, r4, r0
+ adc r5, r5, r1
+ strd r4, [r7], #1 << L2_ORDER
+ ldrd r4, [r7]
+ adds r4, r4, r0
+ adc r5, r5, r1
+ strd r4, [r7]
+
+ /* Update level 1 entries */
+ mov r6, #4
+ mov r7, r2
+2: ldrd r4, [r7]
+ adds r4, r4, r0
+ adc r5, r5, r1
+ strd r4, [r7], #1 << L1_ORDER
+ subs r6, r6, #1
+ bne 2b
+
+ mrrc p15, 0, r4, r5, c2 @ read TTBR0
+ adds r4, r4, r0 @ update physical address
+ adc r5, r5, r1
+ mcrr p15, 0, r4, r5, c2 @ write back TTBR0
+ mrrc p15, 1, r4, r5, c2 @ read TTBR1
+ adds r4, r4, r0 @ update physical address
+ adc r5, r5, r1
+ mcrr p15, 1, r4, r5, c2 @ write back TTBR1
+
+ dsb
+
+ mov ip, #0
+ mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
+ mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
+ dsb
+ isb
+
+ mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU
+ dsb
+ isb
+
+ ldmfd sp!, {r4-r8, pc}
+ENDPROC(lpae_pgtables_remap_asm)