From 00a19f3e25c0c40e0ec77f52d4841d23ad269169 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 8 Nov 2016 09:21:19 +0100 Subject: ARM: 8627/1: avoid cache flushing in flush_dcache_page() When the data cache is PIPT or VIPT non-aliasing, and cache operations are broadcast by the hardware, we can always postpone the flush in flush_dcache_page(). A similar change was done for ARM64 in commit b5b6c9e9149d ("arm64: Avoid cache flushing in flush_dcache_page()"). Reviewed-by: Catalin Marinas Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/mm/flush.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 3cced8455727..f1e6190aa7ea 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -327,6 +327,12 @@ void flush_dcache_page(struct page *page) if (page == ZERO_PAGE(0)) return; + if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); + return; + } + mapping = page_mapping(page); if (!cache_ops_need_broadcast() && -- cgit From 79964a1c2972ca7ecc231e2d2ac7593a1af73f63 Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Mon, 12 Dec 2016 09:31:18 +0100 Subject: ARM: 8633/1: nommu: allow mmap when !CONFIG_MMU commit ab6494f0c96f ("nommu: Add noMMU support to the DMA API") have add CONFIG_MMU compilation flag but that prohibit to use dma_mmap_wc() when the platform doesn't have MMU. This patch call vm_iomap_memory() in noMMU case to test if addresses are correct and set vma->vm_flags rather than all return an error. Signed-off-by: Benjamin Gaignard Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ab7710002ba6..afa64ed5e36b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -868,6 +868,9 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_end - vma->vm_start, vma->vm_page_prot); } +#else + ret = vm_iomap_memory(vma, vma->vm_start, + (vma->vm_end - vma->vm_start)); #endif /* CONFIG_MMU */ return ret; -- cgit From c466bda60510eaac797e49db5d34555876d983ae Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 9 Feb 2017 12:00:16 +0000 Subject: ARM: add CPU_THUMB_CAPABLE to indicate possible Thumb support Clean up arch/arm/mm/Kconfig a little to provide a symbol which indicates whether the CPU may support the Thumb instruction set. This gets rid of the growing dependencies on ARM_THUMB, and also gives us a useful Kconfig symbol for choosing the kuser code. Reviewed-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index f68e8ec29447..ac395eca7dee 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -29,6 +29,7 @@ config CPU_ARM720T select CPU_COPY_V4WT if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WT if MMU help A 32-bit RISC processor with 8kByte Cache, Write Buffer and @@ -46,6 +47,7 @@ config CPU_ARM740T select CPU_CACHE_V4 select CPU_CP15_MPU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE help A 32-bit RISC processor with 8KB cache or 4KB variants, write buffer and MPU(Protection Unit) built around @@ -79,6 +81,7 @@ config CPU_ARM920T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM920T is licensed to be produced by numerous vendors, @@ -97,6 +100,7 @@ config CPU_ARM922T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM922T is a version of the ARM920T, but with smaller @@ -116,6 +120,7 @@ config CPU_ARM925T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM925T is a mix between the ARM920T and ARM926T, but with @@ -134,6 +139,7 @@ config CPU_ARM926T select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help This is a variant of the ARM920. It has slightly different @@ -170,6 +176,7 @@ config CPU_ARM940T select CPU_CACHE_VIVT select CPU_CP15_MPU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE help ARM940T is a member of the ARM9TDMI family of general- purpose microprocessors with MPU and separate 4KB @@ -188,6 +195,7 @@ config CPU_ARM946E select CPU_CACHE_VIVT select CPU_CP15_MPU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE help ARM946E-S is a member of the ARM9E-S family of high- performance, 32-bit system-on-chip processor solutions. @@ -206,6 +214,7 @@ config CPU_ARM1020 select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM1020 is the 32K cached version of the ARM10 processor, @@ -225,6 +234,7 @@ config CPU_ARM1020E select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU # ARM1022E @@ -236,6 +246,7 @@ config CPU_ARM1022 select CPU_COPY_V4WB if MMU # can probably do better select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM1022E is an implementation of the ARMv5TE architecture @@ -254,6 +265,7 @@ config CPU_ARM1026 select CPU_COPY_V4WB if MMU # can probably do better select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU help The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture @@ -302,6 +314,7 @@ config CPU_XSCALE select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU # XScale Core Version 3 @@ -312,6 +325,7 @@ config CPU_XSC3 select CPU_CACHE_VIVT select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU select IO_36 @@ -324,6 +338,7 @@ config CPU_MOHAWK select CPU_COPY_V4WB if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_V4WBI if MMU # Feroceon @@ -335,6 +350,7 @@ config CPU_FEROCEON select CPU_COPY_FEROCEON if MMU select CPU_CP15_MMU select CPU_PABRT_LEGACY + select CPU_THUMB_CAPABLE select CPU_TLB_FEROCEON if MMU config CPU_FEROCEON_OLD_ID @@ -367,6 +383,7 @@ config CPU_V6 select CPU_CP15_MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V6 + select CPU_THUMB_CAPABLE select CPU_TLB_V6 if MMU # ARMv6k @@ -381,6 +398,7 @@ config CPU_V6K select CPU_CP15_MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V6 + select CPU_THUMB_CAPABLE select CPU_TLB_V6 if MMU # ARMv7 @@ -396,6 +414,7 @@ config CPU_V7 select CPU_CP15_MPU if !MMU select CPU_HAS_ASID if MMU select CPU_PABRT_V7 + select CPU_THUMB_CAPABLE select CPU_TLB_V7 if MMU # ARMv7M @@ -410,11 +429,17 @@ config CPU_V7M config CPU_THUMBONLY bool + select CPU_THUMB_CAPABLE # There are no CPUs available with MMU that don't implement an ARM ISA: depends on !MMU help Select this if your CPU doesn't support the 32 bit ARM instructions. +config CPU_THUMB_CAPABLE + bool + help + Select this if your CPU can support Thumb mode. + # Figure out what processor architecture version we should be using. # This defines the compiler instruction set which depends on the machine type. config CPU_32v3 @@ -655,11 +680,7 @@ config ARCH_DMA_ADDR_T_64BIT config ARM_THUMB bool "Support Thumb user binaries" if !CPU_THUMBONLY - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ - CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \ - CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ - CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \ - CPU_V7 || CPU_FEROCEON || CPU_V7M + depends on CPU_THUMB_CAPABLE default y help Say Y if you want to include kernel support for running user space -- cgit From 374d446d25d6271ee615952a3b7f123ba4983c35 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Jan 2017 22:51:08 +0100 Subject: ARM: 8636/1: Cleanup sanity_check_meminfo The logic for sanity_check_meminfo has become difficult to follow. Clean up the code so it's more obvious what the code is actually trying to do. Additionally, meminfo is now removed so rename the function to better describe its purpose. Tested-by: Magnus Lilja Reviewed-by: Nicolas Pitre Signed-off-by: Laura Abbott Signed-off-by: Laura Abbott Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 66 +++++++++++++++++++---------------------------------- arch/arm/mm/nommu.c | 8 +++---- 2 files changed, 28 insertions(+), 46 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4001dd15818d..b8f70a3bb7f8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1152,13 +1152,11 @@ early_param("vmalloc", early_vmalloc); phys_addr_t arm_lowmem_limit __initdata = 0; -void __init sanity_check_meminfo(void) +void __init adjust_lowmem_bounds(void) { phys_addr_t memblock_limit = 0; - int highmem = 0; u64 vmalloc_limit; struct memblock_region *reg; - bool should_use_highmem = false; /* * Let's use our own (unoptimized) equivalent of __pa() that is @@ -1172,43 +1170,18 @@ void __init sanity_check_meminfo(void) for_each_memblock(memory, reg) { phys_addr_t block_start = reg->base; phys_addr_t block_end = reg->base + reg->size; - phys_addr_t size_limit = reg->size; - if (reg->base >= vmalloc_limit) - highmem = 1; - else - size_limit = vmalloc_limit - reg->base; - - - if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { - - if (highmem) { - pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", - &block_start, &block_end); - memblock_remove(reg->base, reg->size); - should_use_highmem = true; - continue; - } - - if (reg->size > size_limit) { - phys_addr_t overlap_size = reg->size - size_limit; - - pr_notice("Truncating RAM at %pa-%pa", - &block_start, &block_end); - block_end = vmalloc_limit; - pr_cont(" to -%pa", &block_end); - memblock_remove(vmalloc_limit, overlap_size); - should_use_highmem = true; - } - } - - if (!highmem) { - if (block_end > arm_lowmem_limit) { - if (reg->size > size_limit) - arm_lowmem_limit = vmalloc_limit; - else - arm_lowmem_limit = block_end; - } + if (reg->base < vmalloc_limit) { + if (block_end > arm_lowmem_limit) + /* + * Compare as u64 to ensure vmalloc_limit does + * not get truncated. block_end should always + * fit in phys_addr_t so there should be no + * issue with assignment. + */ + arm_lowmem_limit = min_t(u64, + vmalloc_limit, + block_end); /* * Find the first non-pmd-aligned page, and point @@ -1233,9 +1206,6 @@ void __init sanity_check_meminfo(void) } } - if (should_use_highmem) - pr_notice("Consider using a HIGHMEM enabled kernel.\n"); - high_memory = __va(arm_lowmem_limit - 1) + 1; /* @@ -1248,6 +1218,18 @@ void __init sanity_check_meminfo(void) if (!memblock_limit) memblock_limit = arm_lowmem_limit; + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { + if (memblock_end_of_DRAM() > arm_lowmem_limit) { + phys_addr_t end = memblock_end_of_DRAM(); + + pr_notice("Ignoring RAM at %pa-%pa\n", + &memblock_limit, &end); + pr_notice("Consider using a HIGHMEM enabled kernel.\n"); + + memblock_remove(memblock_limit, end - memblock_limit); + } + } + memblock_set_current_limit(memblock_limit); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 2740967727e2..13a25d6282f8 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -85,7 +85,7 @@ static unsigned long irbar_read(void) } /* MPU initialisation functions */ -void __init sanity_check_meminfo_mpu(void) +void __init adjust_lowmem_bounds_mpu(void) { phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; @@ -274,7 +274,7 @@ void __init mpu_setup(void) } } #else -static void sanity_check_meminfo_mpu(void) {} +static void adjust_lowmem_bounds_mpu(void) {} static void __init mpu_setup(void) {} #endif /* CONFIG_ARM_MPU */ @@ -295,10 +295,10 @@ void __init arm_mm_memblock_reserve(void) #endif } -void __init sanity_check_meminfo(void) +void __init adjust_lowmem_bounds(void) { phys_addr_t end; - sanity_check_meminfo_mpu(); + adjust_lowmem_bounds_mpu(); end = memblock_end_of_DRAM(); high_memory = __va(end - 1) + 1; memblock_set_current_limit(end); -- cgit From 985626564eedc470ce2866e53938303368ad41b7 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Jan 2017 22:51:45 +0100 Subject: ARM: 8637/1: Adjust memory boundaries after reservations adjust_lowmem_bounds is responsible for setting up the boundary for lowmem/highmem. This needs to be setup before memblock reservations can occur. At the time memblock reservations can occur, memory can also be removed from the system. The lowmem/highmem boundary and end of memory may be affected by this but it is currently not recalculated. On some systems this may be harmless, on others this may result in incorrect ranges being passed to the main memory allocator. Correct this by recalculating the lowmem/highmem boundary after all reservations have been made. Tested-by: Magnus Lilja Signed-off-by: Laura Abbott Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b8f70a3bb7f8..5cbfd9f86412 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1157,6 +1157,7 @@ void __init adjust_lowmem_bounds(void) phys_addr_t memblock_limit = 0; u64 vmalloc_limit; struct memblock_region *reg; + phys_addr_t lowmem_limit = 0; /* * Let's use our own (unoptimized) equivalent of __pa() that is @@ -1172,14 +1173,14 @@ void __init adjust_lowmem_bounds(void) phys_addr_t block_end = reg->base + reg->size; if (reg->base < vmalloc_limit) { - if (block_end > arm_lowmem_limit) + if (block_end > lowmem_limit) /* * Compare as u64 to ensure vmalloc_limit does * not get truncated. block_end should always * fit in phys_addr_t so there should be no * issue with assignment. */ - arm_lowmem_limit = min_t(u64, + lowmem_limit = min_t(u64, vmalloc_limit, block_end); @@ -1200,12 +1201,14 @@ void __init adjust_lowmem_bounds(void) if (!IS_ALIGNED(block_start, PMD_SIZE)) memblock_limit = block_start; else if (!IS_ALIGNED(block_end, PMD_SIZE)) - memblock_limit = arm_lowmem_limit; + memblock_limit = lowmem_limit; } } } + arm_lowmem_limit = lowmem_limit; + high_memory = __va(arm_lowmem_limit - 1) + 1; /* -- cgit From a09975bf6c756e4555a95258ff4b2286dcfddc4e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 15 Jan 2017 03:57:40 +0100 Subject: ARM: 8639/1: Define KERNEL_START and KERNEL_END In preparation for adding CONFIG_DEBUG_VIRTUAL support, define a set of common constants: KERNEL_START and KERNEL_END which abstract CONFIG_XIP_KERNEL vs. !CONFIG_XIP_KERNEL. Update the code where relevant. Acked-by: Russell King Signed-off-by: Florian Fainelli Signed-off-by: Russell King --- arch/arm/mm/init.c | 7 ++----- arch/arm/mm/mmu.c | 6 +----- 2 files changed, 3 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 370581aeb871..4127f578086c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -230,11 +230,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) void __init arm_memblock_init(const struct machine_desc *mdesc) { /* Register the kernel text, kernel data and initrd with memblock. */ -#ifdef CONFIG_XIP_KERNEL - memblock_reserve(__pa(_sdata), _end - _sdata); -#else - memblock_reserve(__pa(_stext), _end - _stext); -#endif + memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); + #ifdef CONFIG_BLK_DEV_INITRD /* FDT scan will populate initrd_start */ if (initrd_start && !phys_initrd_size) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5cbfd9f86412..4e016d7f37b3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1422,11 +1422,7 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; -#ifdef CONFIG_XIP_KERNEL - phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE); -#else - phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); -#endif + phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); /* Map all the lowmem memory banks. */ -- cgit From e377cd8221ebbe0b517861aa3d823bb42f9abbd4 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 15 Jan 2017 03:59:00 +0100 Subject: ARM: 8640/1: Add support for CONFIG_DEBUG_VIRTUAL x86 has an option: CONFIG_DEBUG_VIRTUAL to do additional checks on virt_to_phys calls. The goal is to catch users who are calling virt_to_phys on non-linear addresses immediately. This includes caller using __virt_to_phys() on image addresses instead of __pa_symbol(). This is a generally useful debug feature to spot bad code (particulary in drivers). Acked-by: Russell King Acked-by: Laura Abbott Signed-off-by: Florian Fainelli Signed-off-by: Russell King --- arch/arm/mm/Makefile | 1 + arch/arm/mm/physaddr.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 arch/arm/mm/physaddr.c (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index e8698241ece9..b3dea80715b4 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -14,6 +14,7 @@ endif obj-$(CONFIG_ARM_PTDUMP) += dump.o obj-$(CONFIG_MODULES) += proc-syms.o +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o obj-$(CONFIG_HIGHMEM) += highmem.o diff --git a/arch/arm/mm/physaddr.c b/arch/arm/mm/physaddr.c new file mode 100644 index 000000000000..02e60f495608 --- /dev/null +++ b/arch/arm/mm/physaddr.c @@ -0,0 +1,57 @@ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mm.h" + +static inline bool __virt_addr_valid(unsigned long x) +{ + /* + * high_memory does not get immediately defined, and there + * are early callers of __pa() against PAGE_OFFSET + */ + if (!high_memory && x >= PAGE_OFFSET) + return true; + + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) + return true; + + /* + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an + * actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS) + * that we just need to work around it and always return true. + */ + if (x == MAX_DMA_ADDRESS) + return true; + + return false; +} + +phys_addr_t __virt_to_phys(unsigned long x) +{ + WARN(!__virt_addr_valid(x), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, (void *)x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START || + x > (unsigned long)KERNEL_END); + + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); -- cgit From d2ca5f2491c1246adf3847101fdc538a3b89439c Mon Sep 17 00:00:00 2001 From: Afzal Mohammed Date: Sun, 29 Jan 2017 17:31:32 +0100 Subject: ARM: 8646/1: mmu: decouple VECTORS_BASE from Kconfig For MMU configurations, VECTORS_BASE is always 0xffff0000, a macro definition will suffice. For no-MMU, exception base address is dynamically determined in subsequent patches. To preserve bisectability, now make the macro applicable for no-MMU scenario too. Thanks to 0-DAY kernel test infrastructure that found the bisectability issue. This macro will be restricted to MMU case upon dynamically determining exception base address for no-MMU. Once exception address is handled dynamically for no-MMU, VECTORS_BASE can be removed from Kconfig. Signed-off-by: afzal mohammed Tested-by: Vladimir Murzin Signed-off-by: Russell King --- arch/arm/mm/dump.c | 5 +++-- arch/arm/mm/init.c | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 9fe8e241335c..21192d6eda40 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -18,6 +18,7 @@ #include #include +#include #include struct addr_marker { @@ -31,8 +32,8 @@ static struct addr_marker address_markers[] = { { 0, "vmalloc() Area" }, { VMALLOC_END, "vmalloc() End" }, { FIXADDR_START, "Fixmap Area" }, - { CONFIG_VECTORS_BASE, "Vectors" }, - { CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, + { VECTORS_BASE, "Vectors" }, + { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, { -1, NULL }, }; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4127f578086c..50e5402a8ef3 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -518,8 +519,7 @@ void __init mem_init(void) " .data : 0x%p" " - 0x%p" " (%4td kB)\n" " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", - MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + - (PAGE_SIZE)), + MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE), #ifdef CONFIG_HAVE_TCM MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end), -- cgit From f8300a0b5de08c09db105db3c34a2a1c618e147e Mon Sep 17 00:00:00 2001 From: Afzal Mohammed Date: Wed, 1 Feb 2017 13:46:36 +0100 Subject: ARM: 8647/2: nommu: dynamic exception base address setting No-MMU dynamic exception base address configuration on CP15 processors. In the case of low vectors, decision based on whether security extensions are enabled & whether remap vectors to RAM CONFIG option is selected. For no-MMU without CP15, current default value of 0x0 is retained. Signed-off-by: afzal mohammed Tested-by: Vladimir Murzin Signed-off-by: Russell King --- arch/arm/mm/nommu.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 13a25d6282f8..3b5c7aaf9c76 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -22,6 +23,8 @@ #include "mm.h" +unsigned long vectors_base; + #ifdef CONFIG_ARM_MPU struct mpu_rgn_info mpu_rgn_info; @@ -278,15 +281,60 @@ static void adjust_lowmem_bounds_mpu(void) {} static void __init mpu_setup(void) {} #endif /* CONFIG_ARM_MPU */ +#ifdef CONFIG_CPU_CP15 +#ifdef CONFIG_CPU_HIGH_VECTOR +static unsigned long __init setup_vectors_base(void) +{ + unsigned long reg = get_cr(); + + set_cr(reg | CR_V); + return 0xffff0000; +} +#else /* CONFIG_CPU_HIGH_VECTOR */ +/* Write exception base address to VBAR */ +static inline void set_vbar(unsigned long val) +{ + asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc"); +} + +/* + * Security extensions, bits[7:4], permitted values, + * 0b0000 - not implemented, 0b0001/0b0010 - implemented + */ +static inline bool security_extensions_enabled(void) +{ + return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); +} + +static unsigned long __init setup_vectors_base(void) +{ + unsigned long base = 0, reg = get_cr(); + + set_cr(reg & ~CR_V); + if (security_extensions_enabled()) { + if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) + base = CONFIG_DRAM_BASE; + set_vbar(base); + } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) { + if (CONFIG_DRAM_BASE != 0) + pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n"); + } + + return base; +} +#endif /* CONFIG_CPU_HIGH_VECTOR */ +#endif /* CONFIG_CPU_CP15 */ + void __init arm_mm_memblock_reserve(void) { #ifndef CONFIG_CPU_V7M + vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0; /* * Register the exception vector page. * some architectures which the DRAM is the exception vector to trap, * alloc_page breaks with error, although it is not NULL, but "0." */ - memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE); + memblock_reserve(vectors_base, 2 * PAGE_SIZE); #else /* ifndef CONFIG_CPU_V7M */ /* * There is no dedicated vector page on V7-M. So nothing needs to be @@ -310,7 +358,7 @@ void __init adjust_lowmem_bounds(void) */ void __init paging_init(const struct machine_desc *mdesc) { - early_trap_init((void *)CONFIG_VECTORS_BASE); + early_trap_init((void *)vectors_base); mpu_setup(); bootmem_init(); } -- cgit From 06369a1e58bd5eb89c02f9a2cf1a35152cbf1154 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 1 Feb 2017 14:30:09 +0100 Subject: ARM: 8652/1: cache-uniphier: clean up active way setup code Now, the active way setup function is called with a fixed value zero for the second argument. The code can be simpler. Signed-off-by: Masahiro Yamada Signed-off-by: Russell King --- arch/arm/mm/cache-uniphier.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c index dfe97b409916..f57b080b6fd4 100644 --- a/arch/arm/mm/cache-uniphier.c +++ b/arch/arm/mm/cache-uniphier.c @@ -15,6 +15,7 @@ #define pr_fmt(fmt) "uniphier: " fmt +#include #include #include #include @@ -71,8 +72,7 @@ * @ctrl_base: virtual base address of control registers * @rev_base: virtual base address of revision registers * @op_base: virtual base address of operation registers - * @way_present_mask: each bit specifies if the way is present - * @way_locked_mask: each bit specifies if the way is locked + * @way_mask: each bit specifies if the way is present * @nsets: number of associativity sets * @line_size: line size in bytes * @range_op_max_size: max size that can be handled by a single range operation @@ -83,8 +83,7 @@ struct uniphier_cache_data { void __iomem *rev_base; void __iomem *op_base; void __iomem *way_ctrl_base; - u32 way_present_mask; - u32 way_locked_mask; + u32 way_mask; u32 nsets; u32 line_size; u32 range_op_max_size; @@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on) writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); } -static void __init __uniphier_cache_set_locked_ways( - struct uniphier_cache_data *data, - u32 way_mask) +static void __init __uniphier_cache_set_active_ways( + struct uniphier_cache_data *data) { unsigned int cpu; - data->way_locked_mask = way_mask & data->way_present_mask; - for_each_possible_cpu(cpu) - writel_relaxed(~data->way_locked_mask & data->way_present_mask, - data->way_ctrl_base + 4 * cpu); + writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu); } static void uniphier_cache_maint_range(unsigned long start, unsigned long end, @@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void) list_for_each_entry(data, &uniphier_cache_list, list) { __uniphier_cache_enable(data, true); - __uniphier_cache_set_locked_ways(data, 0); + __uniphier_cache_set_active_ways(data); } } @@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np, goto err; } - data->way_present_mask = - ((u32)1 << cache_size / data->nsets / data->line_size) - 1; + data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1, + 0); data->ctrl_base = of_iomap(np, 0); if (!data->ctrl_base) { -- cgit From 3928624812dcfa39b6a67f9de46efcb51c573ad0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Jan 2017 15:11:10 +0000 Subject: ARM: mm: move initrd init code out of arm_memblock_init() Move the ARM initrd initialisation code out of arm_memblock_init() into its own function, so it can be cleaned up. Signed-off-by: Russell King --- arch/arm/mm/init.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 50e5402a8ef3..43d8825e59bb 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -228,11 +228,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) return phys; } -void __init arm_memblock_init(const struct machine_desc *mdesc) +static void __init arm_initrd_init(void) { - /* Register the kernel text, kernel data and initrd with memblock. */ - memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); - #ifdef CONFIG_BLK_DEV_INITRD /* FDT scan will populate initrd_start */ if (initrd_start && !phys_initrd_size) { @@ -260,6 +257,14 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) initrd_end = initrd_start + phys_initrd_size; } #endif +} + +void __init arm_memblock_init(const struct machine_desc *mdesc) +{ + /* Register the kernel text, kernel data and initrd with memblock. */ + memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); + + arm_initrd_init(); arm_mm_memblock_reserve(); -- cgit From 68b32f361f3892fc376051b1702954b2dc692d13 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Jan 2017 15:13:25 +0000 Subject: ARM: mm: clean up initrd initialisation Rather than repeatedly testing phys_initrd_size to see if the initrd is still enabled, return from the new function to avoid executing the remaining initialisation. Signed-off-by: Russell King --- arch/arm/mm/init.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 43d8825e59bb..15739a95552a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -236,26 +236,29 @@ static void __init arm_initrd_init(void) phys_initrd_start = __virt_to_phys(initrd_start); phys_initrd_size = initrd_end - initrd_start; } + initrd_start = initrd_end = 0; - if (phys_initrd_size && - !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + + if (!phys_initrd_size) + return; + + if (!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", (u64)phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; + return; } - if (phys_initrd_size && - memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + + if (memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", (u64)phys_initrd_start, phys_initrd_size); - phys_initrd_start = phys_initrd_size = 0; + return; } - if (phys_initrd_size) { - memblock_reserve(phys_initrd_start, phys_initrd_size); - /* Now convert initrd to virtual addresses */ - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } + memblock_reserve(phys_initrd_start, phys_initrd_size); + + /* Now convert initrd to virtual addresses */ + initrd_start = __phys_to_virt(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; #endif } -- cgit From cdcc5fa0415d943288c6316bd32e76befb1027c5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 16 Jan 2017 15:21:05 +0000 Subject: ARM: mm: round the initrd reservation to page boundaries Round the initrd memblock reservation to page boundaries to prevent other data sharing the initrd pages. This prevents an allocation possibly overlapping with the initrd, which would later get trampled on in free_initrd_mem(). Signed-off-by: Russell King --- arch/arm/mm/init.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 15739a95552a..d1e26610977d 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -231,6 +231,9 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) static void __init arm_initrd_init(void) { #ifdef CONFIG_BLK_DEV_INITRD + phys_addr_t start; + unsigned long size; + /* FDT scan will populate initrd_start */ if (initrd_start && !phys_initrd_size) { phys_initrd_start = __virt_to_phys(initrd_start); @@ -242,19 +245,29 @@ static void __init arm_initrd_init(void) if (!phys_initrd_size) return; - if (!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { + /* + * Round the memory region to page boundaries as per free_initrd_mem() + * This allows us to detect whether the pages overlapping the initrd + * are in use, but more importantly, reserves the entire set of pages + * as we don't want these pages allocated for other purposes. + */ + start = round_down(phys_initrd_start, PAGE_SIZE); + size = phys_initrd_size + (phys_initrd_start - start); + size = round_up(size, PAGE_SIZE); + + if (!memblock_is_region_memory(start, size)) { pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", - (u64)phys_initrd_start, phys_initrd_size); + (u64)start, size); return; } - if (memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { + if (memblock_is_region_reserved(start, size)) { pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", - (u64)phys_initrd_start, phys_initrd_size); + (u64)start, size); return; } - memblock_reserve(phys_initrd_start, phys_initrd_size); + memblock_reserve(start, size); /* Now convert initrd to virtual addresses */ initrd_start = __phys_to_virt(phys_initrd_start); -- cgit