From cb9d7707cd9be57830f31616233f6a872ca8416d Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:50:59 +0100 Subject: ARM: 6222/1: add memory types for the TCMs The earlier TCM memory regions were mapped as MT_MEMORY_UNCACHED which doesn't really work on platforms supporting the new v6 features like the NX bit. Add unique MT_MEMORY_[I|D]TCM types instead. Cc: Nicolas Pitre Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/mach/map.h | 2 ++ arch/arm/kernel/tcm.c | 5 ++--- arch/arm/mm/mmu.c | 13 +++++++++++++ 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 742c2aaeb020..d2fedb5aeb1f 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -27,6 +27,8 @@ struct map_desc { #define MT_MEMORY 9 #define MT_ROM 10 #define MT_MEMORY_NONCACHED 11 +#define MT_MEMORY_DTCM 12 +#define MT_MEMORY_ITCM 13 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index e50303868f1b..0c62aa2c86ea 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -13,7 +13,6 @@ #include #include #include /* memcpy */ -#include /* PAGE_SHIFT */ #include #include #include @@ -53,7 +52,7 @@ static struct map_desc dtcm_iomap[] __initdata = { .virtual = DTCM_OFFSET, .pfn = __phys_to_pfn(DTCM_OFFSET), .length = (DTCM_END - DTCM_OFFSET + 1), - .type = MT_UNCACHED + .type = MT_MEMORY_DTCM } }; @@ -62,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), .length = (ITCM_END - ITCM_OFFSET + 1), - .type = MT_UNCACHED + .type = MT_MEMORY_ITCM } }; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 285894171186..e53480148c05 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -258,6 +258,19 @@ static struct mem_type mem_types[] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, .domain = DOMAIN_KERNEL, }, + [MT_MEMORY_DTCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | + L_PTE_DIRTY | L_PTE_WRITE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_KERNEL, + }, + [MT_MEMORY_ITCM] = { + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | + L_PTE_USER | L_PTE_EXEC, + .prot_l1 = PMD_TYPE_TABLE, + .domain = DOMAIN_IO, + }, }; const struct mem_type *get_mem_type(unsigned int type) -- cgit From 598509779e5b8037d371df764d7438744a24b61f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:51:41 +0100 Subject: ARM: 6223/1: support multiple TCM banks CPUs v6 and up support multiple TCM banks, for example an ITCM of 8k is supplied in two 4k banks. This makes the TCM work on the 1176JZF-S devchip. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/kernel/tcm.c | 68 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 46 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index 0c62aa2c86ea..f2ead32c1aa0 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -92,14 +92,24 @@ void tcm_free(void *addr, size_t len) } EXPORT_SYMBOL(tcm_free); - -static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) +static void __init setup_tcm_bank(u8 type, u8 bank, u8 banks, + u32 offset, u32 expected_size) { const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, -1, -1, -1, -1 }; u32 tcm_region; int tcm_size; + /* + * If there are more than one TCM bank of this type, + * select the TCM bank to operate on in the TCM selection + * register. + */ + if (banks > 1) + asm("mcr p15, 0, %0, c9, c2, 0" + : /* No output operands */ + : "r" (bank)); + /* Read the special TCM region register c9, 0 */ if (!type) asm("mrc p15, 0, %0, c9, c1, 0" @@ -110,21 +120,23 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; if (tcm_size < 0) { - pr_err("CPU: %sTCM of unknown size!\n", - type ? "I" : "D"); + pr_err("CPU: %sTCM%d of unknown size!\n", + type ? "I" : "D", bank); } else { - pr_info("CPU: found %sTCM %dk @ %08x, %senabled\n", + pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", type ? "I" : "D", + bank, tcm_size, (tcm_region & 0xfffff000U), (tcm_region & 1) ? "" : "not "); } - if (tcm_size != expected_size) { - pr_crit("CPU: %sTCM was detected %dk but expected %dk!\n", - type ? "I" : "D", - tcm_size, - expected_size); + if (tcm_size != (expected_size >> 10)) { + pr_crit("CPU: %sTCM%d was detected %dk but expected %dk!\n", + type ? "I" : "D", + bank, + tcm_size, + (expected_size >> 10)); /* Adjust to the expected size? what can we do... */ } @@ -140,26 +152,37 @@ static void __init setup_tcm_bank(u8 type, u32 offset, u32 expected_size) : /* No output operands */ : "r" (tcm_region)); - pr_debug("CPU: moved %sTCM %dk to %08x, enabled\n", - type ? "I" : "D", - tcm_size, - (tcm_region & 0xfffff000U)); + pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", + type ? "I" : "D", + bank, + tcm_size, + (tcm_region & 0xfffff000U)); } +/* We expect to find what is configured for the platform */ +#define DTCM_EXPECTED (DTCM_END - DTCM_OFFSET + 1) +#define ITCM_EXPECTED (ITCM_END - ITCM_OFFSET + 1) + /* * This initializes the TCM memory */ void __init tcm_init(void) { u32 tcm_status = read_cpuid_tcmstatus(); + u8 dtcm_banks = (tcm_status >> 16) & 0x03; + u32 dtcm_banksize = DTCM_EXPECTED / dtcm_banks; + u8 itcm_banks = (tcm_status & 0x03); + u32 itcm_banksize = ITCM_EXPECTED / itcm_banks; char *start; char *end; char *ram; + int i; /* Setup DTCM if present */ - if (tcm_status & (1 << 16)) { - setup_tcm_bank(0, DTCM_OFFSET, - (DTCM_END - DTCM_OFFSET + 1) >> 10); + for (i = 0; i < dtcm_banks; i++) { + setup_tcm_bank(0, i, dtcm_banks, + DTCM_OFFSET + (i * dtcm_banksize), + dtcm_banksize); request_resource(&iomem_resource, &dtcm_res); iotable_init(dtcm_iomap, 1); /* Copy data from RAM to DTCM */ @@ -171,9 +194,10 @@ void __init tcm_init(void) } /* Setup ITCM if present */ - if (tcm_status & 1) { - setup_tcm_bank(1, ITCM_OFFSET, - (ITCM_END - ITCM_OFFSET + 1) >> 10); + for (i = 0; i < itcm_banks; i++) { + setup_tcm_bank(1, i, itcm_banks, + ITCM_OFFSET + (i * itcm_banksize), + itcm_banksize); request_resource(&iomem_resource, &itcm_res); iotable_init(itcm_iomap, 1); /* Copy code from RAM to ITCM */ @@ -207,7 +231,7 @@ static int __init setup_tcm_pool(void) pr_debug("Setting up TCM memory pool\n"); /* Add the rest of DTCM to the TCM pool */ - if (tcm_status & (1 << 16)) { + if (tcm_status & (0x03 << 16)) { if (dtcm_pool_start < DTCM_END) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, DTCM_END - dtcm_pool_start + 1, -1); @@ -224,7 +248,7 @@ static int __init setup_tcm_pool(void) } /* Add the rest of ITCM to the TCM pool */ - if (tcm_status & 1) { + if (tcm_status & 0x03) { if (itcm_pool_start < ITCM_END) { ret = gen_pool_add(tcm_pool, itcm_pool_start, ITCM_END - itcm_pool_start + 1, -1); -- cgit From 07d2a5c721c6aa2bd69812a74c8b3b116abf3e56 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:52:34 +0100 Subject: ARM: 6224/1: print TCM whereabouts in init message If TCM is in use, we should display it in the virtual memory layout along with everything else. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mm/init.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f6a999465323..526af48b1271 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -611,6 +611,14 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_HAVE_TCM +#ifdef DTCM_OFFSET + " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif +#ifdef ITCM_OFFSET + " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" +#endif +#endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_MMU " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" @@ -627,6 +635,14 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), +#ifdef CONFIG_HAVE_TCM +#ifdef DTCM_OFFSET + MLK(UL(DTCM_OFFSET), UL(DTCM_END + 1)), +#endif +#ifdef ITCM_OFFSET + MLK(UL(ITCM_OFFSET), UL(ITCM_END + 1)), +#endif +#endif MLK(FIXADDR_START, FIXADDR_TOP), #ifdef CONFIG_MMU MLM(CONSISTENT_BASE, CONSISTENT_END), -- cgit From 1dbd30e9890fd69e50b17edd70ca583546b0fe4e Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 12 Jul 2010 21:53:28 +0100 Subject: ARM: 6225/1: make TCM allocation static and common for all archs This changes the TCM handling so that a fixed area is reserved at 0xfffe0000-0xfffeffff for TCM. This areas is used by XScale but XScale does not have TCM so the mechanisms are mutually exclusive. This change is needed to make TCM detection more dynamic while still being able to compile code into it, and is a must for the unified ARM goals: the current TCM allocation at different places in memory for each machine would be a nightmare if you want to compile a single image for more than one machine with TCM so it has to be nailed down in one place. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 9 ++++ arch/arm/kernel/tcm.c | 91 +++++++++++++++++--------------- arch/arm/mach-u300/include/mach/memory.h | 8 --- arch/arm/mm/init.c | 17 +++--- 4 files changed, 64 insertions(+), 61 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 4312ee5e3d0b..ab08d977ad49 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -123,6 +123,15 @@ #endif /* !CONFIG_MMU */ +/* + * We fix the TCM memories max 32 KiB ITCM resp DTCM at these + * locations + */ +#ifdef CONFIG_HAVE_TCM +#define ITCM_OFFSET UL(0xfffe0000) +#define DTCM_OFFSET UL(0xfffe8000) +#endif + /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index f2ead32c1aa0..26685c2f7a49 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c @@ -18,32 +18,30 @@ #include #include "tcm.h" -/* Scream and warn about misuse */ -#if !defined(ITCM_OFFSET) || !defined(ITCM_END) || \ - !defined(DTCM_OFFSET) || !defined(DTCM_END) -#error "TCM support selected but offsets not defined!" -#endif - static struct gen_pool *tcm_pool; /* TCM section definitions from the linker */ extern char __itcm_start, __sitcm_text, __eitcm_text; extern char __dtcm_start, __sdtcm_data, __edtcm_data; +/* These will be increased as we run */ +u32 dtcm_end = DTCM_OFFSET; +u32 itcm_end = ITCM_OFFSET; + /* * TCM memory resources */ static struct resource dtcm_res = { .name = "DTCM RAM", .start = DTCM_OFFSET, - .end = DTCM_END, + .end = DTCM_OFFSET, .flags = IORESOURCE_MEM }; static struct resource itcm_res = { .name = "ITCM RAM", .start = ITCM_OFFSET, - .end = ITCM_END, + .end = ITCM_OFFSET, .flags = IORESOURCE_MEM }; @@ -51,7 +49,7 @@ static struct map_desc dtcm_iomap[] __initdata = { { .virtual = DTCM_OFFSET, .pfn = __phys_to_pfn(DTCM_OFFSET), - .length = (DTCM_END - DTCM_OFFSET + 1), + .length = 0, .type = MT_MEMORY_DTCM } }; @@ -60,7 +58,7 @@ static struct map_desc itcm_iomap[] __initdata = { { .virtual = ITCM_OFFSET, .pfn = __phys_to_pfn(ITCM_OFFSET), - .length = (ITCM_END - ITCM_OFFSET + 1), + .length = 0, .type = MT_MEMORY_ITCM } }; @@ -92,8 +90,8 @@ void tcm_free(void *addr, size_t len) } EXPORT_SYMBOL(tcm_free); -static void __init setup_tcm_bank(u8 type, u8 bank, u8 banks, - u32 offset, u32 expected_size) +static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, + u32 *offset) { const int tcm_sizes[16] = { 0, -1, -1, 4, 8, 16, 32, 64, 128, 256, 512, 1024, -1, -1, -1, -1 }; @@ -120,8 +118,13 @@ static void __init setup_tcm_bank(u8 type, u8 bank, u8 banks, tcm_size = tcm_sizes[(tcm_region >> 2) & 0x0f]; if (tcm_size < 0) { - pr_err("CPU: %sTCM%d of unknown size!\n", + pr_err("CPU: %sTCM%d of unknown size\n", type ? "I" : "D", bank); + return -EINVAL; + } else if (tcm_size > 32) { + pr_err("CPU: %sTCM%d larger than 32k found\n", + type ? "I" : "D", bank); + return -EINVAL; } else { pr_info("CPU: found %sTCM%d %dk @ %08x, %senabled\n", type ? "I" : "D", @@ -131,17 +134,8 @@ static void __init setup_tcm_bank(u8 type, u8 bank, u8 banks, (tcm_region & 1) ? "" : "not "); } - if (tcm_size != (expected_size >> 10)) { - pr_crit("CPU: %sTCM%d was detected %dk but expected %dk!\n", - type ? "I" : "D", - bank, - tcm_size, - (expected_size >> 10)); - /* Adjust to the expected size? what can we do... */ - } - /* Force move the TCM bank to where we want it, enable */ - tcm_region = offset | (tcm_region & 0x00000ffeU) | 1; + tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; if (!type) asm("mcr p15, 0, %0, c9, c1, 0" @@ -152,17 +146,17 @@ static void __init setup_tcm_bank(u8 type, u8 bank, u8 banks, : /* No output operands */ : "r" (tcm_region)); + /* Increase offset */ + *offset += (tcm_size << 10); + pr_info("CPU: moved %sTCM%d %dk to %08x, enabled\n", type ? "I" : "D", bank, tcm_size, (tcm_region & 0xfffff000U)); + return 0; } -/* We expect to find what is configured for the platform */ -#define DTCM_EXPECTED (DTCM_END - DTCM_OFFSET + 1) -#define ITCM_EXPECTED (ITCM_END - ITCM_OFFSET + 1) - /* * This initializes the TCM memory */ @@ -170,40 +164,51 @@ void __init tcm_init(void) { u32 tcm_status = read_cpuid_tcmstatus(); u8 dtcm_banks = (tcm_status >> 16) & 0x03; - u32 dtcm_banksize = DTCM_EXPECTED / dtcm_banks; u8 itcm_banks = (tcm_status & 0x03); - u32 itcm_banksize = ITCM_EXPECTED / itcm_banks; char *start; char *end; char *ram; + int ret; int i; /* Setup DTCM if present */ - for (i = 0; i < dtcm_banks; i++) { - setup_tcm_bank(0, i, dtcm_banks, - DTCM_OFFSET + (i * dtcm_banksize), - dtcm_banksize); + if (dtcm_banks > 0) { + for (i = 0; i < dtcm_banks; i++) { + ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end); + if (ret) + return; + } + dtcm_res.end = dtcm_end - 1; request_resource(&iomem_resource, &dtcm_res); + dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; iotable_init(dtcm_iomap, 1); /* Copy data from RAM to DTCM */ start = &__sdtcm_data; end = &__edtcm_data; ram = &__dtcm_start; + /* This means you compiled more code than fits into DTCM */ + BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); memcpy(start, ram, (end-start)); pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); } /* Setup ITCM if present */ - for (i = 0; i < itcm_banks; i++) { - setup_tcm_bank(1, i, itcm_banks, - ITCM_OFFSET + (i * itcm_banksize), - itcm_banksize); + if (itcm_banks > 0) { + for (i = 0; i < itcm_banks; i++) { + ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end); + if (ret) + return; + } + itcm_res.end = itcm_end - 1; request_resource(&iomem_resource, &itcm_res); + itcm_iomap[0].length = itcm_end - ITCM_OFFSET; iotable_init(itcm_iomap, 1); /* Copy code from RAM to ITCM */ start = &__sitcm_text; end = &__eitcm_text; ram = &__itcm_start; + /* This means you compiled more code than fits into ITCM */ + BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); memcpy(start, ram, (end-start)); pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); } @@ -232,9 +237,9 @@ static int __init setup_tcm_pool(void) /* Add the rest of DTCM to the TCM pool */ if (tcm_status & (0x03 << 16)) { - if (dtcm_pool_start < DTCM_END) { + if (dtcm_pool_start < dtcm_end) { ret = gen_pool_add(tcm_pool, dtcm_pool_start, - DTCM_END - dtcm_pool_start + 1, -1); + dtcm_end - dtcm_pool_start, -1); if (ret) { pr_err("CPU DTCM: could not add DTCM " \ "remainder to pool!\n"); @@ -242,16 +247,16 @@ static int __init setup_tcm_pool(void) } pr_debug("CPU DTCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", - DTCM_END - dtcm_pool_start + 1, + dtcm_end - dtcm_pool_start, dtcm_pool_start); } } /* Add the rest of ITCM to the TCM pool */ if (tcm_status & 0x03) { - if (itcm_pool_start < ITCM_END) { + if (itcm_pool_start < itcm_end) { ret = gen_pool_add(tcm_pool, itcm_pool_start, - ITCM_END - itcm_pool_start + 1, -1); + itcm_end - itcm_pool_start, -1); if (ret) { pr_err("CPU ITCM: could not add ITCM " \ "remainder to pool!\n"); @@ -259,7 +264,7 @@ static int __init setup_tcm_pool(void) } pr_debug("CPU ITCM: Added %08x bytes @ %08x to " \ "the TCM memory pool\n", - ITCM_END - itcm_pool_start + 1, + itcm_end - itcm_pool_start, itcm_pool_start); } } diff --git a/arch/arm/mach-u300/include/mach/memory.h b/arch/arm/mach-u300/include/mach/memory.h index ab000df7fc03..bf134bcc129d 100644 --- a/arch/arm/mach-u300/include/mach/memory.h +++ b/arch/arm/mach-u300/include/mach/memory.h @@ -34,14 +34,6 @@ (CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1))*1024*1024 + 0x100) #endif -/* - * TCM memory whereabouts - */ -#define ITCM_OFFSET 0xffff2000 -#define ITCM_END 0xffff3fff -#define DTCM_OFFSET 0xffff4000 -#define DTCM_END 0xffff5fff - /* * We enable a real big DMA buffer if need be. */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 526af48b1271..e00404e6f45b 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -529,6 +529,11 @@ void __init mem_init(void) { unsigned long reserved_pages, free_pages; int i, node; +#ifdef CONFIG_HAVE_TCM + /* These pointers are filled in on TCM detection */ + extern u32 dtcm_end; + extern u32 itcm_end; +#endif #ifndef CONFIG_DISCONTIGMEM max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; @@ -612,12 +617,8 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HAVE_TCM -#ifdef DTCM_OFFSET " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" -#endif -#ifdef ITCM_OFFSET " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" -#endif #endif " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_MMU @@ -636,12 +637,8 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), #ifdef CONFIG_HAVE_TCM -#ifdef DTCM_OFFSET - MLK(UL(DTCM_OFFSET), UL(DTCM_END + 1)), -#endif -#ifdef ITCM_OFFSET - MLK(UL(ITCM_OFFSET), UL(ITCM_END + 1)), -#endif + MLK(DTCM_OFFSET, (unsigned long) dtcm_end), + MLK(ITCM_OFFSET, (unsigned long) itcm_end), #endif MLK(FIXADDR_START, FIXADDR_TOP), #ifdef CONFIG_MMU -- cgit From d746196361c9c635128249bb6cf13e709ae6abe1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 26 Jul 2010 10:29:13 +0100 Subject: ARM: use generic ioremap_page_range() We don't need our own implementation of this, use the generic library implementation instead. Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 74 +++------------------------------------------------ 1 file changed, 4 insertions(+), 70 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 03f11935ed08..ab506272b2d3 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -42,78 +42,11 @@ */ #define VM_ARM_SECTION_MAPPING 0x80000000 -static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, - unsigned long phys_addr, const struct mem_type *type) -{ - pgprot_t prot = __pgprot(type->prot_pte); - pte_t *pte; - - pte = pte_alloc_kernel(pmd, addr); - if (!pte) - return -ENOMEM; - - do { - if (!pte_none(*pte)) - goto bad; - - set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); - phys_addr += PAGE_SIZE; - } while (pte++, addr += PAGE_SIZE, addr != end); - return 0; - - bad: - printk(KERN_CRIT "remap_area_pte: page already exists\n"); - BUG(); -} - -static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, - unsigned long end, unsigned long phys_addr, - const struct mem_type *type) -{ - unsigned long next; - pmd_t *pmd; - int ret = 0; - - pmd = pmd_alloc(&init_mm, pgd, addr); - if (!pmd) - return -ENOMEM; - - do { - next = pmd_addr_end(addr, end); - ret = remap_area_pte(pmd, addr, next, phys_addr, type); - if (ret) - return ret; - phys_addr += next - addr; - } while (pmd++, addr = next, addr != end); - return ret; -} - -static int remap_area_pages(unsigned long start, unsigned long pfn, - size_t size, const struct mem_type *type) -{ - unsigned long addr = start; - unsigned long next, end = start + size; - unsigned long phys_addr = __pfn_to_phys(pfn); - pgd_t *pgd; - int err = 0; - - BUG_ON(addr >= end); - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, end); - err = remap_area_pmd(pgd, addr, next, phys_addr, type); - if (err) - break; - phys_addr += next - addr; - } while (pgd++, addr = next, addr != end); - - return err; -} - int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { - return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); + return ioremap_page_range(virt, virt + PAGE_SIZE, phys, + __pgprot(mtype->prot_pte)); } EXPORT_SYMBOL(ioremap_page); @@ -300,7 +233,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, err = remap_area_sections(addr, pfn, size, type); } else #endif - err = remap_area_pages(addr, pfn, size, type); + err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), + __pgprot(type->prot_pte)); if (err) { vunmap((void *)addr); -- cgit From 5bc23d32d86a132b5636a48dca0fa2528ef69ff9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 25 Jul 2010 08:57:02 +0100 Subject: ARM: DMA coherent allocator: align remapped addresses The DMA coherent remap area is used to provide an uncached mapping of memory for coherency with DMA engines. Currently, we look for any free hole which our allocation will fit in with page alignment. However, this can lead to fragmentation of the area, and allows small allocations to cross L1 entry boundaries. This is undesirable as we want to move towards allocating sections of memory. Align allocations according to the size, limiting the alignment between the page and section sizes. Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 15 ++++++++++++++- arch/arm/mm/vmregion.c | 5 +++-- arch/arm/mm/vmregion.h | 2 +- 3 files changed, 18 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9e7742f0a102..c704eed63c5d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -183,6 +183,8 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) { struct arm_vmregion *c; + size_t align; + int bit; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); @@ -190,10 +192,21 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) return NULL; } + /* + * Align the virtual region allocation - maximum alignment is + * a section size, minimum is a page size. This helps reduce + * fragmentation of the DMA space, and also prevents allocations + * smaller than a section from crossing a section boundary. + */ + bit = fls(size - 1) + 1; + if (bit > SECTION_SHIFT) + bit = SECTION_SHIFT; + align = 1 << bit; + /* * Allocate a virtual address in the consistent mapping region. */ - c = arm_vmregion_alloc(&consistent_head, size, + c = arm_vmregion_alloc(&consistent_head, align, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 19e09bdb1b8a..935993e1b1ef 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c @@ -35,7 +35,8 @@ */ struct arm_vmregion * -arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) +arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, + size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -58,7 +59,7 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) goto nospc; if ((addr + size) <= c->vm_start) goto found; - addr = c->vm_end; + addr = ALIGN(c->vm_end, align); if (addr > end) goto nospc; } diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 6b2cdbdf3a85..15e9f044db9f 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h @@ -21,7 +21,7 @@ struct arm_vmregion { int vm_active; }; -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); +struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); -- cgit From c1ba6ba3dd4c127dd1b14125ac7feed43d974436 Mon Sep 17 00:00:00 2001 From: eric miao Date: Thu, 22 Jul 2010 09:55:53 +0100 Subject: ARM: 6251/1: Make SPARSE_IRQ a hidden option SPARSE_IRQ doesn't need to be a visible option, only those platforms supporting that will select it. Signed-off-by: Eric Miao Signed-off-by: Russell King --- arch/arm/Kconfig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 8ef1e23a24a2..88b4f389ab35 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1287,8 +1287,7 @@ config HW_PERF_EVENTS disabled, perf events will use software events only. config SPARSE_IRQ - bool "Support sparse irq numbering" - depends on EXPERIMENTAL + def_bool n help This enables support for sparse irqs. This is useful in general as most CPUs have a fairly sparse array of IRQ vectors, which @@ -1296,8 +1295,6 @@ config SPARSE_IRQ number of off-chip IRQs will want to treat this as experimental until they have been independently verified. - If you don't know what to do here, say N. - source "mm/Kconfig" config FORCE_MAX_ZONEORDER -- cgit From 3dc91aff9c3ef54b15cdaf32f61f973489fe69eb Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:16:49 +0100 Subject: ARM: 6252/1: Use SIGBUS for unaligned access instead of SIGILL POSIX specify to use signal SIGBUS with code BUS_ADRALN for invalid address alignment. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 4 ++-- arch/arm/mm/fault.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f98c358989a..53a609680c10 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -924,8 +924,8 @@ static int __init alignment_init(void) ai_usermode = UM_FIXUP; } - hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); - hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); + hook_fault_code(1, do_alignment, SIGBUS, "alignment exception"); + hook_fault_code(3, do_alignment, SIGBUS, "alignment exception"); return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index cbfb2edcf7d1..ce6f3a422c81 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -463,9 +463,9 @@ static struct fsr_info { * defines these to be "precise" aborts. */ { do_bad, SIGSEGV, 0, "vector exception" }, - { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, { do_bad, SIGKILL, 0, "terminal exception" }, - { do_bad, SIGILL, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, /* Do we need runtime check ? */ #if __LINUX_ARM_ARCH__ < 6 { do_bad, SIGBUS, 0, "external abort on linefetch" }, -- cgit From 6338a6aa7c082f11d55712251e14178c68bf5869 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:18:19 +0100 Subject: ARM: 6269/1: Add 'code' parameter for hook_fault_code() Add one more parameter to hook_fault_code() to be able to set 'code' field of struct fsr_info. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/include/asm/system.h | 2 +- arch/arm/mach-integrator/pci_v3.c | 8 ++++---- arch/arm/mach-iop13xx/pci.c | 2 +- arch/arm/mach-ixp2000/pci.c | 2 +- arch/arm/mach-ixp23xx/pci.c | 2 +- arch/arm/mach-ixp4xx/common-pci.c | 3 ++- arch/arm/mach-ks8695/pci.c | 4 ++-- arch/arm/mm/alignment.c | 6 ++++-- arch/arm/mm/fault.c | 14 ++++++++------ arch/arm/plat-iop/pci.c | 2 +- 10 files changed, 25 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 5f4f48002734..8ba1ccf82a02 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -83,7 +83,7 @@ void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), - int sig, const char *name); + int sig, int code, const char *name); #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index 9cef0590d5aa..6467d99fa2ee 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -505,10 +505,10 @@ void __init pci_v3_preinit(void) /* * Hook in our fault handler for PCI errors */ - hook_fault_code(4, v3_pci_fault, SIGBUS, "external abort on linefetch"); - hook_fault_code(6, v3_pci_fault, SIGBUS, "external abort on linefetch"); - hook_fault_code(8, v3_pci_fault, SIGBUS, "external abort on non-linefetch"); - hook_fault_code(10, v3_pci_fault, SIGBUS, "external abort on non-linefetch"); + hook_fault_code(4, v3_pci_fault, SIGBUS, 0, "external abort on linefetch"); + hook_fault_code(6, v3_pci_fault, SIGBUS, 0, "external abort on linefetch"); + hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); + hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); spin_lock_irqsave(&v3_lock, flags); diff --git a/arch/arm/mach-iop13xx/pci.c b/arch/arm/mach-iop13xx/pci.c index 6d5a90813d31..773ea0c95b9f 100644 --- a/arch/arm/mach-iop13xx/pci.c +++ b/arch/arm/mach-iop13xx/pci.c @@ -987,7 +987,7 @@ void __init iop13xx_pci_init(void) iop13xx_atux_setup(); } - hook_fault_code(16+6, iop13xx_pci_abort, SIGBUS, + hook_fault_code(16+6, iop13xx_pci_abort, SIGBUS, 0, "imprecise external abort"); } diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c index 90771cad06f8..f797c5f538b0 100644 --- a/arch/arm/mach-ixp2000/pci.c +++ b/arch/arm/mach-ixp2000/pci.c @@ -209,7 +209,7 @@ ixp2000_pci_preinit(void) "the needed workaround has not been configured in"); #endif - hook_fault_code(16+6, ixp2000_pci_abort_handler, SIGBUS, + hook_fault_code(16+6, ixp2000_pci_abort_handler, SIGBUS, 0, "PCI config cycle to non-existent device"); } diff --git a/arch/arm/mach-ixp23xx/pci.c b/arch/arm/mach-ixp23xx/pci.c index 4b0e598a91c9..563819a83292 100644 --- a/arch/arm/mach-ixp23xx/pci.c +++ b/arch/arm/mach-ixp23xx/pci.c @@ -229,7 +229,7 @@ void __init ixp23xx_pci_preinit(void) { ixp23xx_pci_common_init(); - hook_fault_code(16+6, ixp23xx_pci_abort_handler, SIGBUS, + hook_fault_code(16+6, ixp23xx_pci_abort_handler, SIGBUS, 0, "PCI config cycle to non-existent device"); *IXP23XX_PCI_ADDR_EXT = 0x0000e000; diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index e3181534c7f9..f4fbb5ec645b 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -382,7 +382,8 @@ void __init ixp4xx_pci_preinit(void) /* hook in our fault handler for PCI errors */ - hook_fault_code(16+6, abort_handler, SIGBUS, "imprecise external abort"); + hook_fault_code(16+6, abort_handler, SIGBUS, 0, + "imprecise external abort"); pr_debug("setup PCI-AHB(inbound) and AHB-PCI(outbound) address mappings\n"); diff --git a/arch/arm/mach-ks8695/pci.c b/arch/arm/mach-ks8695/pci.c index 78499667eb7b..5fcd082a17f9 100644 --- a/arch/arm/mach-ks8695/pci.c +++ b/arch/arm/mach-ks8695/pci.c @@ -268,8 +268,8 @@ static void __init ks8695_pci_preinit(void) __raw_writel(0, KS8695_PCI_VA + KS8695_PIOBAC); /* hook in fault handlers */ - hook_fault_code(8, ks8695_pci_fault, SIGBUS, "external abort on non-linefetch"); - hook_fault_code(10, ks8695_pci_fault, SIGBUS, "external abort on non-linefetch"); + hook_fault_code(8, ks8695_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); + hook_fault_code(10, ks8695_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); } static void ks8695_show_pciregs(void) diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 53a609680c10..77cfdbed9501 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -924,8 +924,10 @@ static int __init alignment_init(void) ai_usermode = UM_FIXUP; } - hook_fault_code(1, do_alignment, SIGBUS, "alignment exception"); - hook_fault_code(3, do_alignment, SIGBUS, "alignment exception"); + hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); + hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ce6f3a422c81..84131c832430 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -508,13 +508,15 @@ static struct fsr_info { void __init hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), - int sig, const char *name) + int sig, int code, const char *name) { - if (nr >= 0 && nr < ARRAY_SIZE(fsr_info)) { - fsr_info[nr].fn = fn; - fsr_info[nr].sig = sig; - fsr_info[nr].name = name; - } + if (nr < 0 || nr >= ARRAY_SIZE(fsr_info)) + BUG(); + + fsr_info[nr].fn = fn; + fsr_info[nr].sig = sig; + fsr_info[nr].code = code; + fsr_info[nr].name = name; } /* diff --git a/arch/arm/plat-iop/pci.c b/arch/arm/plat-iop/pci.c index ce31f316ac75..43f2b158237c 100644 --- a/arch/arm/plat-iop/pci.c +++ b/arch/arm/plat-iop/pci.c @@ -359,7 +359,7 @@ static void __init iop3xx_atu_debug(void) DBG("ATU: IOP3XX_ATUCMD=0x%04x\n", *IOP3XX_ATUCMD); DBG("ATU: IOP3XX_ATUCR=0x%08x\n", *IOP3XX_ATUCR); - hook_fault_code(16+6, iop3xx_pci_abort, SIGBUS, "imprecise external abort"); + hook_fault_code(16+6, iop3xx_pci_abort, SIGBUS, 0, "imprecise external abort"); } /* for platforms that might be host-bus-adapters */ -- cgit From 33a9c41bf5d8adae9d882513e617c4c645195e71 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:20:22 +0100 Subject: ARM: 6255/1: Workaround infinity loop in handling of translation faults On ARM one Linux PGD entry contains two hardware entries (see page tables layout in pgtable.h). We normally guarantee that we always fill both L1 entries. But create_mapping() doesn't follow the rule. It can create inidividual L1 entries, so here we have to call pmd_none() check in do_translation_fault() for the entry really corresponded to address, not for the first of pair. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/fault.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 84131c832430..564b1c4829e4 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -413,7 +413,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr, pmd_k = pmd_offset(pgd_k, addr); pmd = pmd_offset(pgd, addr); - if (pmd_none(*pmd_k)) + /* + * On ARM one Linux PGD entry contains two hardware entries (see page + * tables layout in pgtable.h). We normally guarantee that we always + * fill both L1 entries. But create_mapping() doesn't follow the rule. + * It can create inidividual L1 entries, so here we have to call + * pmd_none() check for the entry really corresponded to address, not + * for the first of pair. + */ + index = (addr >> SECTION_SHIFT) & 1; + if (pmd_none(pmd_k[index])) goto bad_area; copy_pmd(pmd, pmd_k); -- cgit From 993bf4ec8c2a2b7979389ab196bf2fe217117158 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 22 Jul 2010 13:23:25 +0100 Subject: ARM: 6256/1: Check arch version and modify fsr_info[] depends on it at runtime Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/fault.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 564b1c4829e4..5835e63454ff 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -475,12 +475,7 @@ static struct fsr_info { { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, { do_bad, SIGKILL, 0, "terminal exception" }, { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, -/* Do we need runtime check ? */ -#if __LINUX_ARM_ARCH__ < 6 { do_bad, SIGBUS, 0, "external abort on linefetch" }, -#else - { do_translation_fault, SIGSEGV, SEGV_MAPERR, "I-cache maintenance fault" }, -#endif { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, { do_bad, SIGBUS, 0, "external abort on linefetch" }, { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, @@ -605,3 +600,14 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) arm_notify_die("", regs, &info, ifsr, 0); } +static int __init exceptions_init(void) +{ + if (cpu_architecture() >= CPU_ARCH_ARMv6) { + hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR, + "I-cache maintenance fault"); + } + + return 0; +} + +arch_initcall(exceptions_init); -- cgit From b8ab5397bcbd92e3fd4a9770e0bf59315fa38dab Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Jul 2010 11:20:41 +0100 Subject: ARM: 6268/1: ARMv6K and ARMv7 use fault statuses 3 and 6 as Access Flag fault Statuses 3 (0b00011) and 6 (0x00110) of DFSR are Access Flags faults on ARMv6K and ARMv7. Let's patch fsr_info[] at runtime if we are on ARMv7 or later. Unfortunately, we don't have runtime check for 'K' extension, so we can't check for it. Signed-off-by: Kirill A. Shutemov Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 14 ++++++++++++-- arch/arm/mm/fault.c | 11 +++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 77cfdbed9501..d073b64ae87e 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -926,8 +926,18 @@ static int __init alignment_init(void) hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, "alignment exception"); - hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, - "alignment exception"); + + /* + * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section + * fault, not as alignment error. + * + * TODO: handle ARMv6K properly. Runtime check for 'K' extension is + * needed. + */ + if (cpu_architecture() <= CPU_ARCH_ARMv6) { + hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN, + "alignment exception"); + } return 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5835e63454ff..23b0b03af5ea 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -607,6 +607,17 @@ static int __init exceptions_init(void) "I-cache maintenance fault"); } + if (cpu_architecture() >= CPU_ARCH_ARMv7) { + /* + * TODO: Access flag faults introduced in ARMv6K. + * Runtime check for 'K' extension is needed + */ + hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR, + "section access flag fault"); + hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR, + "section access flag fault"); + } + return 0; } -- cgit From 9ca03a21e320a6bf44559323527aba704bcc8772 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 26 Jul 2010 12:22:12 +0100 Subject: ARM: Factor out common code from cpu_proc_fin() All implementations of cpu_proc_fin() start by disabling interrupts and then flush caches. Rather than have every processors proc_fin() implementation do this, move it out into generic code - and move the cache flush past setup_mm_for_reboot() (so it can benefit from having caches still enabled.) This allows cpu_proc_fin() to become independent of the L1/L2 cache types, and eventually move the L2 cache flushing into the L2 support code. Signed-off-by: Russell King --- arch/arm/kernel/machine_kexec.c | 6 +++++- arch/arm/kernel/process.c | 17 +++++++++++++---- arch/arm/mm/proc-arm1020.S | 6 +----- arch/arm/mm/proc-arm1020e.S | 6 +----- arch/arm/mm/proc-arm1022.S | 6 +----- arch/arm/mm/proc-arm1026.S | 6 +----- arch/arm/mm/proc-arm6_7.S | 2 -- arch/arm/mm/proc-arm720.S | 6 +----- arch/arm/mm/proc-arm740.S | 6 +----- arch/arm/mm/proc-arm7tdmi.S | 2 -- arch/arm/mm/proc-arm920.S | 10 +--------- arch/arm/mm/proc-arm922.S | 10 +--------- arch/arm/mm/proc-arm925.S | 6 +----- arch/arm/mm/proc-arm926.S | 6 +----- arch/arm/mm/proc-arm940.S | 6 +----- arch/arm/mm/proc-arm946.S | 6 +----- arch/arm/mm/proc-arm9tdmi.S | 2 -- arch/arm/mm/proc-fa526.S | 6 +----- arch/arm/mm/proc-feroceon.S | 7 +------ arch/arm/mm/proc-mohawk.S | 6 +----- arch/arm/mm/proc-sa110.S | 8 ++------ arch/arm/mm/proc-sa1100.S | 6 +----- arch/arm/mm/proc-v6.S | 5 +---- arch/arm/mm/proc-v7.S | 5 +---- arch/arm/mm/proc-xsc3.S | 6 +----- arch/arm/mm/proc-xscale.S | 6 +----- 26 files changed, 40 insertions(+), 124 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 598ca61e7bca..3b4872c2da8e 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -74,7 +74,11 @@ void machine_kexec(struct kimage *image) (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); - cpu_proc_fin(); + local_irq_disable(); + local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ + flush_cache_all(); + cpu_proc_fin(); + flush_cache_all(); cpu_reset(reboot_code_buffer_phys); } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index a4a9cc88bec7..aaf51159203a 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -84,10 +85,9 @@ __setup("hlt", hlt_setup); void arm_machine_restart(char mode, const char *cmd) { - /* - * Clean and disable cache, and turn off interrupts - */ - cpu_proc_fin(); + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); /* * Tell the mm system that we are going to reboot - @@ -96,6 +96,15 @@ void arm_machine_restart(char mode, const char *cmd) */ setup_mm_for_reboot(mode); + /* Clean and invalidate caches */ + flush_cache_all(); + + /* Turn off caching */ + cpu_proc_fin(); + + /* Push out any further dirty data, and ensure cache is empty */ + flush_cache_all(); + /* * Now call the architecture specific reboot code. */ diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 72507c630ceb..203a4e944d9e 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020_proc_init) * cpu_arm1020_proc_fin() */ ENTRY(cpu_arm1020_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1020_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1020_reset(loc) diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index d27829805609..1a511e765909 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -79,15 +79,11 @@ ENTRY(cpu_arm1020e_proc_init) * cpu_arm1020e_proc_fin() */ ENTRY(cpu_arm1020e_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1020e_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1020e_reset(loc) diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index ce13e4a827de..1ffa4eb9c34f 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -68,15 +68,11 @@ ENTRY(cpu_arm1022_proc_init) * cpu_arm1022_proc_fin() */ ENTRY(cpu_arm1022_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1022_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1022_reset(loc) diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 636672a29c6d..5697c34b95b0 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -68,15 +68,11 @@ ENTRY(cpu_arm1026_proc_init) * cpu_arm1026_proc_fin() */ ENTRY(cpu_arm1026_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm1026_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm1026_reset(loc) diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 795dc615f43b..64e0b327c7c5 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -184,8 +184,6 @@ ENTRY(cpu_arm7_proc_init) ENTRY(cpu_arm6_proc_fin) ENTRY(cpu_arm7_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov r0, #0x31 @ ....S..DP...M mcr p15, 0, r0, c1, c0, 0 @ disable caches mov pc, lr diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 0b62de244666..9d96824134fc 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -54,15 +54,11 @@ ENTRY(cpu_arm720_proc_init) mov pc, lr ENTRY(cpu_arm720_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - mcr p15, 0, r1, c7, c7, 0 @ invalidate cache - ldmfd sp!, {pc} + mov pc, lr /* * Function: arm720_proc_do_idle(void) diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 01860cdeb2ec..6c1a9ab059ae 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -36,15 +36,11 @@ ENTRY(cpu_arm740_switch_mm) * cpu_arm740_proc_fin() */ ENTRY(cpu_arm740_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip mrc p15, 0, r0, c1, c0, 0 bic r0, r0, #0x3f000000 @ bank/f/lock/s bic r0, r0, #0x0000000c @ w-buffer/cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - mcr p15, 0, r0, c7, c0, 0 @ invalidate cache - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm740_reset(loc) diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 1201b9863829..6a850dbba22e 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -36,8 +36,6 @@ ENTRY(cpu_arm7tdmi_switch_mm) * cpu_arm7tdmi_proc_fin() */ ENTRY(cpu_arm7tdmi_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov pc, lr /* diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 8be81992645d..86f80aa56216 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -69,19 +69,11 @@ ENTRY(cpu_arm920_proc_init) * cpu_arm920_proc_fin() */ ENTRY(cpu_arm920_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip -#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH - bl arm920_flush_kern_cache_all -#else - bl v4wt_flush_kern_cache_all -#endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm920_reset(loc) diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index c0ff8e4b1074..f76ce9b62883 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -71,19 +71,11 @@ ENTRY(cpu_arm922_proc_init) * cpu_arm922_proc_fin() */ ENTRY(cpu_arm922_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip -#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH - bl arm922_flush_kern_cache_all -#else - bl v4wt_flush_kern_cache_all -#endif mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm922_reset(loc) diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 3c6cffe400f6..657bd3f7c153 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -92,15 +92,11 @@ ENTRY(cpu_arm925_proc_init) * cpu_arm925_proc_fin() */ ENTRY(cpu_arm925_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm925_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm925_reset(loc) diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 75b707c9cce1..73f1f3c68910 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -61,15 +61,11 @@ ENTRY(cpu_arm926_proc_init) * cpu_arm926_proc_fin() */ ENTRY(cpu_arm926_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm926_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm926_reset(loc) diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 1af1657819eb..fffb061a45a5 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -37,15 +37,11 @@ ENTRY(cpu_arm940_switch_mm) * cpu_arm940_proc_fin() */ ENTRY(cpu_arm940_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm940_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm940_reset(loc) diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 1664b6aaff79..249a6053760a 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -44,15 +44,11 @@ ENTRY(cpu_arm946_switch_mm) * cpu_arm946_proc_fin() */ ENTRY(cpu_arm946_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl arm946_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x00001000 @ i-cache bic r0, r0, #0x00000004 @ d-cache mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_arm946_reset(loc) diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 28545c29dbcd..db475667fac2 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -36,8 +36,6 @@ ENTRY(cpu_arm9tdmi_switch_mm) * cpu_arm9tdmi_proc_fin() */ ENTRY(cpu_arm9tdmi_proc_fin) - mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, r0 mov pc, lr /* diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08f5ac237ad4..7803fdf70029 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -39,17 +39,13 @@ ENTRY(cpu_fa526_proc_init) * cpu_fa526_proc_fin() */ ENTRY(cpu_fa526_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl fa_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches nop nop - ldmfd sp!, {pc} + mov pc, lr /* * cpu_fa526_reset(loc) diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 53e632343849..b304d0104a4e 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -75,11 +75,6 @@ ENTRY(cpu_feroceon_proc_init) * cpu_feroceon_proc_fin() */ ENTRY(cpu_feroceon_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl feroceon_flush_kern_cache_all - #if defined(CONFIG_CACHE_FEROCEON_L2) && \ !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH) mov r0, #0 @@ -91,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin) bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_feroceon_reset(loc) diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index caa31154e7db..5f6892fcc167 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -51,15 +51,11 @@ ENTRY(cpu_mohawk_proc_init) * cpu_mohawk_proc_fin() */ ENTRY(cpu_mohawk_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl mohawk_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...iz........... bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_mohawk_reset(loc) diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 7b706b389906..a201eb04b5e1 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -44,17 +44,13 @@ ENTRY(cpu_sa110_proc_init) * cpu_sa110_proc_fin() */ ENTRY(cpu_sa110_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl v4wb_flush_kern_cache_all @ clean caches -1: mov r0, #0 + mov r0, #0 mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_sa110_reset(loc) diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 5c47760c2064..7ddc4805bf97 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -55,16 +55,12 @@ ENTRY(cpu_sa1100_proc_init) * - Clean and turn off caches. */ ENTRY(cpu_sa1100_proc_fin) - stmfd sp!, {lr} - mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE - msr cpsr_c, ip - bl v4wb_flush_kern_cache_all mcr p15, 0, ip, c15, c2, 2 @ Disable clock switching mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x000e @ ............wca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_sa1100_reset(loc) diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 2f5a3c23a0fe..22aac8515196 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -42,14 +42,11 @@ ENTRY(cpu_v6_proc_init) mov pc, lr ENTRY(cpu_v6_proc_fin) - stmfd sp!, {lr} - cpsid if @ disable interrupts - bl v6_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr /* * cpu_v6_reset(loc) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 8071bcd4c995..6a8506d99ee9 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -45,14 +45,11 @@ ENTRY(cpu_v7_proc_init) ENDPROC(cpu_v7_proc_init) ENTRY(cpu_v7_proc_fin) - stmfd sp!, {lr} - cpsid if @ disable interrupts - bl v7_flush_kern_cache_all mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1000 @ ...i............ bic r0, r0, #0x0006 @ .............ca. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldmfd sp!, {pc} + mov pc, lr ENDPROC(cpu_v7_proc_fin) /* diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index e5797f1c1db7..361a51e49030 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -90,15 +90,11 @@ ENTRY(cpu_xsc3_proc_init) * cpu_xsc3_proc_fin() */ ENTRY(cpu_xsc3_proc_fin) - str lr, [sp, #-4]! - mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE - msr cpsr_c, r0 - bl xsc3_flush_kern_cache_all @ clean caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldr pc, [sp], #4 + mov pc, lr /* * cpu_xsc3_reset(loc) diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 63037e2162f2..14075979bcba 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -124,15 +124,11 @@ ENTRY(cpu_xscale_proc_init) * cpu_xscale_proc_fin() */ ENTRY(cpu_xscale_proc_fin) - str lr, [sp, #-4]! - mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE - msr cpsr_c, r0 - bl xscale_flush_kern_cache_all @ clean caches mrc p15, 0, r0, c1, c0, 0 @ ctrl register bic r0, r0, #0x1800 @ ...IZ........... bic r0, r0, #0x0006 @ .............CA. mcr p15, 0, r0, c1, c0, 0 @ disable caches - ldr pc, [sp], #4 + mov pc, lr /* * cpu_xscale_reset(loc) -- cgit From 5388a6b266e9c3357353332ba0cd5549082887f1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 26 Jul 2010 13:19:43 +0100 Subject: ARM: SMP: Always enable clock event broadcast support The TWD local timers are unable to wake up the CPU when it is placed into a low power mode, eg. C3. Therefore, we need to adapt things such that the TWD code can cope with this. We do this by always providing a broadcast tick function, and marking the fact that the TWD local timer will stop in low power modes. This means that when the CPU is placed into a low power mode, the core timer code marks this fact, and allows an IPI to be given to the core. Tested-by: Santosh Shilimkar Signed-off-by: Russell King Cc: Catalin Marinas Cc: Thomas Gleixner --- arch/arm/Kconfig | 2 +- arch/arm/kernel/smp.c | 6 +++++- arch/arm/kernel/smp_twd.c | 3 ++- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88b4f389ab35..157b08aa0366 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -56,7 +56,7 @@ config GENERIC_CLOCKEVENTS config GENERIC_CLOCKEVENTS_BROADCAST bool depends on GENERIC_CLOCKEVENTS - default y if SMP && !LOCAL_TIMERS + default y if SMP config HAVE_TCM bool diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b8c3d0f689d9..0170e248a1dd 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -429,7 +429,11 @@ static void smp_timer_broadcast(const struct cpumask *mask) { send_ipi_message(mask, IPI_TIMER); } +#else +#define smp_timer_broadcast NULL +#endif +#ifndef CONFIG_LOCAL_TIMERS static void broadcast_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { @@ -444,7 +448,6 @@ static void local_timer_setup(struct clock_event_device *evt) evt->rating = 400; evt->mult = 1; evt->set_mode = broadcast_timer_set_mode; - evt->broadcast = smp_timer_broadcast; clockevents_register_device(evt); } @@ -456,6 +459,7 @@ void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); + evt->broadcast = smp_timer_broadcast; local_timer_setup(evt); } diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 7c5f0c024db7..35882fbf37f9 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -132,7 +132,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) twd_calibrate_rate(); clk->name = "local_timer"; - clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; + clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_C3STOP; clk->rating = 350; clk->set_mode = twd_set_mode; clk->set_next_event = twd_set_next_event; -- cgit From 3d3f78d752bfada5b6041f2f7bd0833d8bdf7a4a Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 26 Jul 2010 13:31:27 +0100 Subject: ARM: call machine_shutdown() from machine_halt(), etc x86 calls machine_shutdown() from the various machine_*() calls which take the machine down ready for halting, restarting, etc, and uses this to bring the system safely to a point where those actions can be performed. Such actions are stopping the secondary CPUs. So, change the ARM implementation of these to reflect what x86 does. This solves kexec problems on ARM SMP platforms, where the secondary CPUs were left running across the kexec call. Signed-off-by: Russell King --- arch/arm/kernel/machine_kexec.c | 4 ---- arch/arm/kernel/process.c | 12 +++++++++++- arch/arm/kernel/smp.c | 11 +++++++---- 3 files changed, 18 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 3b4872c2da8e..df5958f6864f 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -37,10 +37,6 @@ void machine_kexec_cleanup(struct kimage *image) { } -void machine_shutdown(void) -{ -} - void machine_crash_shutdown(struct pt_regs *regs) { } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index aaf51159203a..2e2ec97cc50c 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -198,19 +198,29 @@ int __init reboot_setup(char *str) __setup("reboot=", reboot_setup); -void machine_halt(void) +void machine_shutdown(void) { +#ifdef CONFIG_SMP + smp_send_stop(); +#endif } +void machine_halt(void) +{ + machine_shutdown(); + while (1); +} void machine_power_off(void) { + machine_shutdown(); if (pm_power_off) pm_power_off(); } void machine_restart(char *cmd) { + machine_shutdown(); arm_pm_restart(reboot_mode, cmd); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 0170e248a1dd..40dc74f2b27f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -471,10 +471,13 @@ static DEFINE_SPINLOCK(stop_lock); */ static void ipi_cpu_stop(unsigned int cpu) { - spin_lock(&stop_lock); - printk(KERN_CRIT "CPU%u: stopping\n", cpu); - dump_stack(); - spin_unlock(&stop_lock); + if (system_state == SYSTEM_BOOTING || + system_state == SYSTEM_RUNNING) { + spin_lock(&stop_lock); + printk(KERN_CRIT "CPU%u: stopping\n", cpu); + dump_stack(); + spin_unlock(&stop_lock); + } set_cpu_online(cpu, false); -- cgit