From 8479d94e9f6a44b5050cbacf653272a561fbe0d0 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Mon, 27 Oct 2008 09:30:57 +0100 Subject: x86, signals: remove duplicated register setup code in ia32 signal delivery Impact: cleanup, no functionality changed ia32_setup_rt_frame() has a duplicated code block labelled "Make -mregparm=3 work" for setting up the register parameters to the user-mode signal handler. This is harmless but ugly. Remove the redundant assignments. Signed-off-by: Mikael Pettersson Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 4bc02b23674b..e82ebd652263 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -572,11 +572,6 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; - /* Make -mregparm=3 work */ - regs->ax = sig; - regs->dx = (unsigned long) &frame->info; - regs->cx = (unsigned long) &frame->uc; - loadsegment(ds, __USER32_DS); loadsegment(es, __USER32_DS); -- cgit From c63dfefd48d92b1db3400fe8de4886a519ac3949 Mon Sep 17 00:00:00 2001 From: Dan McGee Date: Thu, 23 Oct 2008 15:44:02 -0500 Subject: x86: remove dead IRQBALANCE code Impact: cleanup CONFIG_IRQBALANCE was removed in commit 8b8e8c1bf; this ifdef was still around. Signed-off-by: Dan McGee Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index bae0eda95486..28e409fc73f3 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -31,10 +31,6 @@ static inline int irq_canonicalize(int irq) # endif #endif -#ifdef CONFIG_IRQBALANCE -extern int irqbalance_disable(char *str); -#endif - #ifdef CONFIG_HOTPLUG_CPU #include extern void fixup_irqs(cpumask_t map); -- cgit From ad38dab01323a01e825555fc46863b73cd0efdc7 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Mon, 27 Oct 2008 13:30:56 -0700 Subject: x86: use the new byteorder headers Impact: cleanup, no functionality changed Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar --- arch/x86/include/asm/byteorder.h | 74 ++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/byteorder.h b/arch/x86/include/asm/byteorder.h index e02ae2d89acf..f110ad417df3 100644 --- a/arch/x86/include/asm/byteorder.h +++ b/arch/x86/include/asm/byteorder.h @@ -4,26 +4,33 @@ #include #include -#ifdef __GNUC__ +#define __LITTLE_ENDIAN -#ifdef __i386__ - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) +static inline __attribute_const__ __u32 __arch_swab32(__u32 val) { -#ifdef CONFIG_X86_BSWAP - asm("bswap %0" : "=r" (x) : "0" (x)); -#else +#ifdef __i386__ +# ifdef CONFIG_X86_BSWAP + asm("bswap %0" : "=r" (val) : "0" (val)); +# else asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ "rorl $16,%0\n\t" /* swap words */ "xchgb %b0,%h0" /* swap higher bytes */ - : "=q" (x) - : "0" (x)); + : "=q" (val) + : "0" (val)); +# endif + +#else /* __i386__ */ + asm("bswapl %0" + : "=r" (val) + : "0" (val)); #endif - return x; + return val; } +#define __arch_swab32 __arch_swab32 -static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) +static inline __attribute_const__ __u64 __arch_swab64(__u64 val) { +#ifdef __i386__ union { struct { __u32 a; @@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) __u64 u; } v; v.u = val; -#ifdef CONFIG_X86_BSWAP +# ifdef CONFIG_X86_BSWAP asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#else - v.s.a = ___arch__swab32(v.s.a); - v.s.b = ___arch__swab32(v.s.b); +# else + v.s.a = __arch_swab32(v.s.a); + v.s.b = __arch_swab32(v.s.b); asm("xchgl %0,%1" : "=r" (v.s.a), "=r" (v.s.b) : "0" (v.s.a), "1" (v.s.b)); -#endif +# endif return v.u; -} - #else /* __i386__ */ - -static inline __attribute_const__ __u64 ___arch__swab64(__u64 x) -{ asm("bswapq %0" - : "=r" (x) - : "0" (x)); - return x; -} - -static inline __attribute_const__ __u32 ___arch__swab32(__u32 x) -{ - asm("bswapl %0" - : "=r" (x) - : "0" (x)); - return x; -} - + : "=r" (val) + : "0" (val)); + return val; #endif +} +#define __arch_swab64 __arch_swab64 -/* Do not define swab16. Gcc is smart enough to recognize "C" version and - convert it into rotation or exhange. */ - -#define __arch__swab64(x) ___arch__swab64(x) -#define __arch__swab32(x) ___arch__swab32(x) - -#define __BYTEORDER_HAS_U64__ - -#endif /* __GNUC__ */ - -#include +#include #endif /* _ASM_X86_BYTEORDER_H */ -- cgit From 7a5276889cfa96619bf863c87581005f46139986 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 30 Oct 2008 10:38:24 +0000 Subject: x86: simplify X86_MPPARSE config option Impact: cleanup Signed-off-by: Jan Beulich Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 350bee1d54dc..f843de13e242 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -235,21 +235,13 @@ config X86_FIND_SMP_CONFIG def_bool y depends on X86_MPPARSE || X86_VOYAGER -if ACPI config X86_MPPARSE - def_bool y - bool "Enable MPS table" + bool "Enable MPS table" if ACPI + default y depends on X86_LOCAL_APIC help For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -endif - -if !ACPI -config X86_MPPARSE - def_bool y - depends on X86_LOCAL_APIC -endif choice prompt "Subarchitecture Type" -- cgit From a376f30a95a796cde81d6dffde0f5243c8bd8f92 Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Fri, 31 Oct 2008 17:43:04 +0800 Subject: x86: avoid duplicate running of pud_offset and pmd_offset in one_md_table_init() Impact: simplify implementation, cleanup If !(pgd_val(*pgd) & _PAGE_PRESENT) in PAE mode, we need not get value of pmd_table again. Signed-off-by: Zhao Lei Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8396868e82c5..7f8a2daa3fde 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -102,6 +102,8 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); + + return pmd_table; } #endif pud = pud_offset(pgd, 0); -- cgit From 838e8bb71dc0c892bf8f84abd3c709d8fe3a8d3c Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Fri, 24 Oct 2008 16:53:33 +0200 Subject: x86: Implement change_bit with immediate operand as "lock xorb" Impact: Minor optimization. Implement change_bit with immediate bit count as "lock xorb". This is similar to "lock orb" and "lock andb" for set_bit and clear_bit functions. Signed-off-by: Uros Bizjak Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/bitops.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 360010322711..9fa9dcdf344b 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -168,7 +168,15 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) */ static inline void change_bit(int nr, volatile unsigned long *addr) { - asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr)); + if (IS_IMMEDIATE(nr)) { + asm volatile(LOCK_PREFIX "xorb %1,%0" + : CONST_MASK_ADDR(nr, addr) + : "iq" ((u8)CONST_MASK(nr))); + } else { + asm volatile(LOCK_PREFIX "btc %1,%0" + : BITOP_ADDR(addr) + : "Ir" (nr)); + } } /** -- cgit From cb9e35dce94a1b9c59d46224e8a94377d673e204 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 8 Nov 2008 20:27:00 +0100 Subject: x86: clean up rdtsc_barrier() use Impact: cleanup Move rdtsc_barrier() use to vsyscall_64.c where it's relied on, and point out its role in the context of its use. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/tsc.h | 6 +----- arch/x86/kernel/vsyscall_64.c | 9 +++++++++ 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 9cd83a8e40d5..700aeb8d2098 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -44,11 +44,7 @@ static __always_inline cycles_t vget_cycles(void) if (!cpu_has_tsc) return 0; #endif - rdtsc_barrier(); - cycles = (cycles_t)__native_read_tsc(); - rdtsc_barrier(); - - return cycles; + return (cycles_t)__native_read_tsc(); } extern void tsc_init(void); diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 0b8b6690a86d..ebf2f12900f5 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -128,7 +128,16 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) gettimeofday(tv,NULL); return; } + + /* + * Surround the RDTSC by barriers, to make sure it's not + * speculated to outside the seqlock critical section and + * does not cause time warps: + */ + rdtsc_barrier(); now = vread(); + rdtsc_barrier(); + base = __vsyscall_gtod_data.clock.cycle_last; mask = __vsyscall_gtod_data.clock.mask; mult = __vsyscall_gtod_data.clock.mult; -- cgit From 72bc2b1ad62f4d2f0a51b35829093d41f55accce Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sat, 8 Nov 2008 21:15:53 +0100 Subject: [ARM] 5329/1: Feroceon: fix feroceon_l2_inv_range Same fix as commit c7cf72dcadb: when 'start' and 'end' are less than a cacheline apart and 'start' is unaligned we are done after cleaning and invalidating the first cacheline. Cc: Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/cache-feroceon-l2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 13cdae8b0d44..80cd207cbaea 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -150,7 +150,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end) /* * Clean and invalidate partial last cache line. */ - if (end & (CACHE_LINE_SIZE - 1)) { + if (start < end && end & (CACHE_LINE_SIZE - 1)) { l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); end &= ~(CACHE_LINE_SIZE - 1); } @@ -158,7 +158,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end) /* * Invalidate all full cache lines between 'start' and 'end'. */ - while (start != end) { + while (start < end) { unsigned long range_end = calc_range_end(start, end); l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE); start = range_end; -- cgit From 0fded351a7d03fc69484d5a9e655fbd15a8e7dab Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 8 Nov 2008 19:14:56 +0000 Subject: [ARM] realview: correct MMC clock rate The MMC clock source is actually 24MHz, not 33MHz. Signed-off-by: Russell King --- arch/arm/mach-realview/clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-realview/clock.c b/arch/arm/mach-realview/clock.c index 3e706c57833a..3347c4236a60 100644 --- a/arch/arm/mach-realview/clock.c +++ b/arch/arm/mach-realview/clock.c @@ -104,7 +104,7 @@ static struct clk uart_clk = { static struct clk mmci_clk = { .name = "MCLK", - .rate = 33000000, + .rate = 24000000, }; int clk_register(struct clk *clk) -- cgit From 7bfc0b2e266dd4cd3d3f27a3ad31bf79974190b1 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 8 Nov 2008 20:12:36 +0000 Subject: [ARM] versatile: correct MMC clock rate The MMC clock source is actually 24MHz, not 33MHz. Signed-off-by: Russell King --- arch/arm/mach-versatile/clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c index 9336508ec0b2..58937f1fb38c 100644 --- a/arch/arm/mach-versatile/clock.c +++ b/arch/arm/mach-versatile/clock.c @@ -105,7 +105,7 @@ static struct clk uart_clk = { static struct clk mmci_clk = { .name = "MCLK", - .rate = 33000000, + .rate = 24000000, }; int clk_register(struct clk *clk) -- cgit From ebb4c65869db7213280ad9c510637683939b5ff8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 9 Nov 2008 11:18:36 +0000 Subject: [ARM] iop: iop3xx needs registers mapped uncached+unbuffered Mikael Pettersson reported: The 2.6.28-rc kernels fail to detect PCI device 0000:00:01.0 (the first ethernet port) on my Thecus n2100 XScale box. There is however still a strange "ghost" device that gets partially detected in 2.6.28-rc2 vanilla. The IOP321 manual says: The user designates the memory region containing the OCCDR as non-cacheable and non-bufferable from the IntelR XScaleTM core. This guarantees that all load/stores to the OCCDR are only of DWORD quantities. Ensure that the OCCDR is so mapped. Signed-off-by: Russell King --- arch/arm/include/asm/mach/map.h | 13 +++++++------ arch/arm/mm/mmu.c | 6 ++++++ arch/arm/plat-iop/setup.c | 5 +++-- 3 files changed, 16 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index cb1139ac1943..39d949b63e80 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -19,12 +19,13 @@ struct map_desc { }; /* types 0-3 are defined in asm/io.h */ -#define MT_CACHECLEAN 4 -#define MT_MINICLEAN 5 -#define MT_LOW_VECTORS 6 -#define MT_HIGH_VECTORS 7 -#define MT_MEMORY 8 -#define MT_ROM 9 +#define MT_UNCACHED 4 +#define MT_CACHECLEAN 5 +#define MT_MINICLEAN 6 +#define MT_LOW_VECTORS 7 +#define MT_HIGH_VECTORS 8 +#define MT_MEMORY 9 +#define MT_ROM 10 #ifdef CONFIG_MMU extern void iotable_init(struct map_desc *, int); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e63db11f16a8..7f36c825718d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -208,6 +208,12 @@ static struct mem_type mem_types[] = { .prot_sect = PROT_SECT_DEVICE, .domain = DOMAIN_IO, }, + [MT_UNCACHED] = { + .prot_pte = PROT_PTE_DEVICE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, + .domain = DOMAIN_IO, + }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c index 4689db638e95..9e573e78176a 100644 --- a/arch/arm/plat-iop/setup.c +++ b/arch/arm/plat-iop/setup.c @@ -16,14 +16,15 @@ #include /* - * Standard IO mapping for all IOP3xx based systems + * Standard IO mapping for all IOP3xx based systems. Note that + * the IOP3xx OCCDR must be mapped uncached and unbuffered. */ static struct map_desc iop3xx_std_desc[] __initdata = { { /* mem mapped registers */ .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), .length = IOP3XX_PERIPHERAL_SIZE, - .type = MT_DEVICE, + .type = MT_UNCACHED, }, { /* PCI IO space */ .virtual = IOP3XX_PCI_LOWER_IO_VA, .pfn = __phys_to_pfn(IOP3XX_PCI_LOWER_IO_PA), -- cgit From 3044646148cdfa83a311bf1c146a70e550280159 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sun, 9 Nov 2008 10:07:58 -0800 Subject: x86: move iomap.h to the new include location a new file was accidentally added to include/asm-x86; move it to the new arch/x86/include/asm location Signed-off-by: Arjan van de Ven --- arch/x86/include/asm/iomap.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 arch/x86/include/asm/iomap.h (limited to 'arch') diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h new file mode 100644 index 000000000000..c1f06289b14b --- /dev/null +++ b/arch/x86/include/asm/iomap.h @@ -0,0 +1,30 @@ +/* + * Copyright © 2008 Ingo Molnar + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + */ + +#include +#include +#include +#include +#include +#include + +void * +iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); + +void +iounmap_atomic(void *kvaddr, enum km_type type); -- cgit From 4fcc50abdffb517cee36cec9cb22138d84fb62d0 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 9 Nov 2008 08:10:03 +0100 Subject: x86: clean up vget_cycles() Impact: remove unused variable I forgot to remove the now unused "cycles_t cycles" parameter from vget_cycles() - which triggers build warnings as tsc.h is included in a number of files. Remove it. Signed-off-by: Ingo Molnar --- arch/x86/include/asm/tsc.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 700aeb8d2098..38ae163cc91b 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -34,8 +34,6 @@ static inline cycles_t get_cycles(void) static __always_inline cycles_t vget_cycles(void) { - cycles_t cycles; - /* * We only do VDSOs on TSC capable CPUs, so this shouldnt * access boot_cpu_data (which is not VDSO-safe): -- cgit From 19f47c634ea8c5a10ff7bb1a08c52fd0f49bc54c Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Sun, 9 Nov 2008 21:28:15 -0800 Subject: x86: x86_32 has its own irq_regs definition Impact: cleanup Arches that have their own irq_regs definition are expected to define ARCH_HAS_OWN_IRQ_REGS or else a generic (unused) set will also be defined in lib/irq_regs.c Sparse noticed the unused generic one had no prototype: lib/irq_regs.c:15:1: warning: symbol 'per_cpu____irq_regs' was not declared. Should it be static? Signed-off-by: Harvey Harrison Signed-off-by: Ingo Molnar --- arch/x86/include/asm/irq_regs_32.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h index af2f02d27fc7..86afd7473457 100644 --- a/arch/x86/include/asm/irq_regs_32.h +++ b/arch/x86/include/asm/irq_regs_32.h @@ -9,6 +9,8 @@ #include +#define ARCH_HAS_OWN_IRQ_REGS + DECLARE_PER_CPU(struct pt_regs *, irq_regs); static inline struct pt_regs *get_irq_regs(void) -- cgit From acca4f4d9bd657e8bc7e1665ba5077465138f133 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 10 Nov 2008 20:00:45 +0900 Subject: sh: Handle fixmap TLB eviction more coherently. There was a race in the kmap_coherent() implementation. While we guarded against preemption, there was nothing preventing eviction of the pre-faulted fixmap entry from the UTLB. Under certain workloads this would result in the fixmap entries used for cache colouring being evicted from the UTLB in the midst of a copy_page(). In addition to pre-faulting, we also make sure to preserve the PTEs in the kernel page table and introduce a cached PTE for kmap_coherent() usage. This follows a similar change on MIPS ("[MIPS] Fix aliasing bug in copy_to_user_page / copy_from_user_page"). Reported-by: Hideo Saito Reported-by: CHIKAMA Masaki Tested-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable.h | 6 ++++++ arch/sh/mm/init.c | 12 +++++++++--- arch/sh/mm/pg-sh4.c | 17 +++++++++++++++++ 3 files changed, 32 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 52220d70a096..b517ae08b9c0 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -148,6 +148,12 @@ extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd); +#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_CPU_SH4) && defined(CONFIG_MMU) +extern void kmap_coherent_init(void); +#else +#define kmap_coherent_init() do { } while (0) +#endif + #include #endif /* __ASM_SH_PGTABLE_H */ diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 4abf00031dae..6cbef8caeb56 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -137,6 +137,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long vaddr; int nid; /* We don't need to map the kernel through the TLB, as @@ -148,10 +149,15 @@ void __init paging_init(void) * check for a null value. */ set_TTB(swapper_pg_dir); - /* Populate the relevant portions of swapper_pg_dir so that + /* + * Populate the relevant portions of swapper_pg_dir so that * we can use the fixmap entries without calling kmalloc. - * pte's will be filled in by __set_fixmap(). */ - page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir); + * pte's will be filled in by __set_fixmap(). + */ + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; + page_table_range_init(vaddr, 0, swapper_pg_dir); + + kmap_coherent_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 38870e0fc182..2fe14da1f839 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -7,6 +7,7 @@ * Released under the terms of the GNU GPL v2.0. */ #include +#include #include #include #include @@ -16,6 +17,20 @@ #define CACHE_ALIAS (current_cpu_data.dcache.alias_mask) +#define kmap_get_fixmap_pte(vaddr) \ + pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) + +static pte_t *kmap_coherent_pte; + +void __init kmap_coherent_init(void) +{ + unsigned long vaddr; + + /* cache the first coherent kmap pte */ + vaddr = __fix_to_virt(FIX_CMAP_BEGIN); + kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); +} + static inline void *kmap_coherent(struct page *page, unsigned long addr) { enum fixed_addresses idx; @@ -34,6 +49,8 @@ static inline void *kmap_coherent(struct page *page, unsigned long addr) update_mmu_cache(NULL, vaddr, pte); + set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); + return (void *)vaddr; } -- cgit From 6cd10f8db385ba547811baa5b26f672fdff232e6 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 9 Nov 2008 11:53:14 -0600 Subject: x86, voyager: fix smp generic helper voyager breakage Impact: build/boot fix for x86/Voyager This change: | commit 3d4422332711ef48ef0f132f1fcbfcbd56c7f3d1 | Author: Jens Axboe | Date: Thu Jun 26 11:21:34 2008 +0200 | | Add generic helpers for arch IPI function calls didn't wire up the voyager smp call function correctly, so do that here. Also make CONFIG_USE_GENERIC_SMP_HELPERS a def_bool y again, since we now use the generic helpers for every x86 architecture. Signed-off-by: James Bottomley Cc: Jens Axboe Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 5 ++++- arch/x86/mach-voyager/voyager_smp.c | 16 ++++++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4cf0ab13d187..ac22bb7719f7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ config X86_SMP bool depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) - select USE_GENERIC_SMP_HELPERS default y +config USE_GENERIC_SMP_HELPERS + def_bool y + depends on SMP + config X86_32_SMP def_bool y depends on X86_32 && SMP diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 0e331652681e..52145007bd7e 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c @@ -7,6 +7,7 @@ * This file provides all the same external entries as smp.c but uses * the voyager hal to provide the functionality */ +#include #include #include #include @@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void) x86_write_percpu(cpu_number, hard_smp_processor_id()); } +static void voyager_send_call_func(cpumask_t callmask) +{ + __u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id()); + send_CPI(mask, VIC_CALL_FUNCTION_CPI); +} + +static void voyager_send_call_func_single(int cpu) +{ + send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI); +} + struct smp_ops smp_ops = { .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, .smp_prepare_cpus = voyager_smp_prepare_cpus, @@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = { .smp_send_stop = voyager_smp_send_stop, .smp_send_reschedule = voyager_smp_send_reschedule, - .send_call_func_ipi = native_send_call_func_ipi, - .send_call_func_single_ipi = native_send_call_func_single_ipi, + .send_call_func_ipi = voyager_send_call_func, + .send_call_func_single_ipi = voyager_send_call_func_single, }; -- cgit From c41ef344de212bd918f7765af21b5008628c03e0 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 28 Oct 2008 18:16:58 -0200 Subject: KVM: MMU: increase per-vcpu rmap cache alloc size The page fault path can use two rmap_desc structures, if: - walk_addr's dirty pte update allocates one rmap_desc. - mmu_lock is dropped, sptes are zapped resulting in rmap_desc being freed. - fetch->mmu_set_spte allocates another rmap_desc. Increase to 4 for safety. Signed-off-by: Marcelo Tosatti Signed-off-by: Avi Kivity --- arch/x86/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a5e64881d9b..f1983d9477cd 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -314,7 +314,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) if (r) goto out; r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, - rmap_desc_cache, 1); + rmap_desc_cache, 4); if (r) goto out; r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); -- cgit From a2e4e28946c2c282a040ba4945c8f7288de69118 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Thu, 23 Oct 2008 15:02:52 +0800 Subject: KVM: ia64: Use guest signal mask when blocking Before a vcpu blocks, it should switch to the guest signal mask to allow signals to unblock it. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/kvm-ia64.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 3caac477de9e..af1464f7a6ad 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -673,16 +673,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu_load(vcpu); + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); - vcpu_put(vcpu); - return -EAGAIN; + r = -EAGAIN; + goto out; } - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - if (vcpu->mmio_needed) { memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); kvm_set_mmio_data(vcpu); @@ -690,7 +690,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) vcpu->mmio_needed = 0; } r = __vcpu_run(vcpu, kvm_run); - +out: if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); -- cgit From a29a2af378f3f6362b68e126e2541c8bde885ead Mon Sep 17 00:00:00 2001 From: Rakib Mullick Date: Wed, 29 Oct 2008 14:13:39 -0700 Subject: x86: KVM guest: fix section mismatch warning in kvmclock.c WARNING: arch/x86/kernel/built-in.o(.text+0x1722c): Section mismatch in reference from the function kvm_setup_secondary_clock() to the function .devinit.text:setup_secondary_APIC_clock() The function kvm_setup_secondary_clock() references the function __devinit setup_secondary_APIC_clock(). This is often because kvm_setup_secondary_clock lacks a __devinit annotation or the annotation of setup_secondary_APIC_clock is wrong. Signed-off-by: Md.Rakib H. Mullick Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Avi Kivity --- arch/x86/kernel/kvmclock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 774ac4991568..1c9cc431ea4f 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -128,7 +128,7 @@ static int kvm_register_clock(char *txt) } #ifdef CONFIG_X86_LOCAL_APIC -static void kvm_setup_secondary_clock(void) +static void __devinit kvm_setup_secondary_clock(void) { /* * Now that the first cpu already had this clocksource initialized, -- cgit From ca93e992fdfdc6569ac2845d7560eeb5de4a4e0b Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 4 Nov 2008 11:25:17 +0200 Subject: KVM: Require the PCI subsystem PCI device assignment makes calls to pci code, so require it to be built into the kernel. Signed-off-by: Avi Kivity --- arch/ia64/kvm/Kconfig | 2 ++ arch/x86/kvm/Kconfig | 2 ++ 2 files changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig index 8e99fed6b3fd..f833a0b4188d 100644 --- a/arch/ia64/kvm/Kconfig +++ b/arch/ia64/kvm/Kconfig @@ -20,6 +20,8 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM && EXPERIMENTAL + # for device assignment: + depends on PCI select PREEMPT_NOTIFIERS select ANON_INODES ---help--- diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ce3251ce5504..b81125f0bdee 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -20,6 +20,8 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM + # for device assignment: + depends on PCI select PREEMPT_NOTIFIERS select MMU_NOTIFIER select ANON_INODES -- cgit From 928d4bf747e9c290b690ff515d8f81e8ee226d97 Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Thu, 6 Nov 2008 14:55:45 +0800 Subject: KVM: VMX: Set IGMT bit in EPT entry There is a potential issue that, when guest using pagetable without vmexit when EPT enabled, guest would use PAT/PCD/PWT bits to index PAT msr for it's memory, which would be inconsistent with host side and would cause host MCE due to inconsistent cache attribute. The patch set IGMT bit in EPT entry to ignore guest PAT and use WB as default memory type to protect host (notice that all memory mapped by KVM should be WB). Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/x86/kvm/vmx.c | 3 ++- arch/x86/kvm/vmx.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2643b430d83a..d06b4dc0e2ea 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3564,7 +3564,8 @@ static int __init vmx_init(void) bypass_guest_pf = 0; kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | VMX_EPT_WRITABLE_MASK | - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT | + VMX_EPT_IGMT_BIT); kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, VMX_EPT_EXECUTABLE_MASK); kvm_enable_tdp(); diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 3e010d21fdd7..ec5edc339da6 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h @@ -352,6 +352,7 @@ enum vmcs_field { #define VMX_EPT_READABLE_MASK 0x1ull #define VMX_EPT_WRITABLE_MASK 0x2ull #define VMX_EPT_EXECUTABLE_MASK 0x4ull +#define VMX_EPT_IGMT_BIT (1ull << 6) #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul -- cgit From c60ff51eb26dfcfb0bdc807b09a096aeadd01325 Mon Sep 17 00:00:00 2001 From: Xiantao Zhang Date: Sat, 8 Nov 2008 15:46:59 +0800 Subject: KVM: ia64: fix vmm_spin_{un}lock for !CONFIG_SMP In the case of !CONFIG_SMP, raw_spinlock_t is empty and the spinlock functions don't build. Fix by defining spinlock functions for the uniprocessor case. Signed-off-by: Xiantao Zhang Signed-off-by: Avi Kivity --- arch/ia64/kvm/vcpu.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 341e3fee280c..e9b2a4e121c0 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -384,6 +384,10 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define MODE_IND(psr) \ (((psr).it << 2) + ((psr).dt << 1) + (psr).rt) +#ifndef CONFIG_SMP +#define _vmm_raw_spin_lock(x) do {}while(0) +#define _vmm_raw_spin_unlock(x) do {}while(0) +#else #define _vmm_raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ @@ -403,6 +407,7 @@ static inline u64 __gpfn_is_io(u64 gpfn) do { barrier(); \ ((spinlock_t *)x)->raw_lock.lock = 0; } \ while (0) +#endif void vmm_spin_lock(spinlock_t *lock); void vmm_spin_unlock(spinlock_t *lock); -- cgit From e17d1dc0863767bab8fde4ba9be92c7f79e7fe50 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Tue, 11 Nov 2008 13:09:36 +0200 Subject: KVM: Fix pit memory leak if unable to allocate irq source id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported-By: Daniel Marjamäki Signed-off-by: Avi Kivity --- arch/x86/kvm/i8254.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 8772dc946823..59ebd37ad79e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -548,8 +548,10 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm) mutex_lock(&kvm->lock); pit->irq_source_id = kvm_request_irq_source_id(kvm); mutex_unlock(&kvm->lock); - if (pit->irq_source_id < 0) + if (pit->irq_source_id < 0) { + kfree(pit); return NULL; + } mutex_init(&pit->pit_state.lock); mutex_lock(&pit->pit_state.lock); -- cgit From 65e503814dec83c7b2ac955e75919d009109c919 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 11 Nov 2008 13:12:33 -0700 Subject: iop-adma: use iop_paranoia() for debug BUG_ONs Now that the critical read back to flush the next descriptor address is fixed we can downgrade some BUG_ONs that need only be enabled when testing changes to the driver. Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx-adma.h | 5 +++-- arch/arm/include/asm/hardware/iop_adma.h | 6 ++++++ arch/arm/mach-iop13xx/include/mach/adma.h | 3 ++- 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 87bff09633aa..83e6ba338e2c 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -730,7 +730,8 @@ static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, { /* hw_desc->next_desc is the same location for all channels */ union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; - BUG_ON(hw_desc.dma->next_desc); + + iop_paranoia(hw_desc.dma->next_desc); hw_desc.dma->next_desc = next_desc_addr; } @@ -760,7 +761,7 @@ static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; - BUG_ON(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); + iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); return desc_ctrl.zero_result_err; } diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index cb7e3611bcba..385c6e8cbbd2 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -23,6 +23,12 @@ #define IOP_ADMA_SLOT_SIZE 32 #define IOP_ADMA_THRESHOLD 4 +#ifdef DEBUG +#define IOP_PARANOIA 1 +#else +#define IOP_PARANOIA 0 +#endif +#define iop_paranoia(x) BUG_ON(IOP_PARANOIA && (x)) /** * struct iop_adma_device - internal representation of an ADMA device diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 60019c8e6465..5722e86f2174 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h @@ -404,7 +404,8 @@ static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, u32 next_desc_addr) { struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; - BUG_ON(hw_desc->next_desc); + + iop_paranoia(hw_desc->next_desc); hw_desc->next_desc = next_desc_addr; } -- cgit From 32836259ff25ce97010569706cd33ba94de81d62 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Wed, 5 Nov 2008 16:17:52 -0700 Subject: ACPI: pci_link: remove acpi_irq_balance_set() interface This removes the acpi_irq_balance_set() interface from the PCI interrupt link driver. x86 used acpi_irq_balance_set() to tell the PCI interrupt link driver to configure links to minimize IRQ sharing. But the link driver can easily figure out whether to turn on IRQ balancing based on the IRQ model (PIC/IOAPIC/etc), so we can get rid of that external interface. It's better for the driver to figure this out at init-time. If we set it externally via the x86 code, the interface reduces modularity, and we depend on the fact that acpi_process_madt() happens before we process the kernel command line. Signed-off-by: Bjorn Helgaas Signed-off-by: Len Brown --- arch/x86/include/asm/acpi.h | 1 - arch/x86/kernel/acpi/boot.c | 1 - 2 files changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 8d676d8ecde9..9830681446ad 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -113,7 +113,6 @@ static inline void acpi_disable_pci(void) acpi_pci_disabled = 1; acpi_noirq_set(); } -extern int acpi_irq_balance_set(char *str); /* routines for saving/restoring kernel state */ extern int acpi_save_state_mem(void); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 8c1f76abae9e..4c51a2f8fd31 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1343,7 +1343,6 @@ static void __init acpi_process_madt(void) error = acpi_parse_madt_ioapic_entries(); if (!error) { acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC; - acpi_irq_balance_set(NULL); acpi_ioapic = 1; smp_found_config = 1; -- cgit From 51ee3d92bfb983790b9ed576c22f59d42adff329 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Tue, 11 Nov 2008 12:19:11 +0900 Subject: fix sci type for SH7723 This patch changes sci type of SH7723 from PORT_SCI to PORT_SCIFA. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index a7412cede534..6d9e6972cfc9 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -119,17 +119,17 @@ static struct plat_sci_port sci_platform_data[] = { },{ .mapbase = 0xa4e30000, .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCI, + .type = PORT_SCIFA, .irqs = { 56, 56, 56, 56 }, },{ .mapbase = 0xa4e40000, .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCI, + .type = PORT_SCIFA, .irqs = { 88, 88, 88, 88 }, },{ .mapbase = 0xa4e50000, .flags = UPF_BOOT_AUTOCONF, - .type = PORT_SCI, + .type = PORT_SCIFA, .irqs = { 109, 109, 109, 109 }, }, { .flags = 0, -- cgit From 185aed75570fb4f78ef283dfa26cd9da5fa06a91 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 12 Nov 2008 12:53:48 +0900 Subject: sh: Provide a sane valid_phys_addr_range() to prevent TLB reset with PMB. With the PMB enabled, only P1SEG and up are covered by the PMB mappings, meaning that situations where out-of-bounds physical addresses are read from will lead to TLB reset after the PMB miss, allowing for use cases like dd if=/dev/mem to reset the TLB. Fix this up to make sure the reference is between __MEMORY_START (phys) and __pa(high_memory). This is coherent across all variants of sh/sh64 with and without MMU, though the PMB bug itself is only applicable to SH-4A parts. Reported-by: Hideo Saito Signed-off-by: Paul Mundt --- arch/sh/include/asm/io.h | 4 ++++ arch/sh/mm/Makefile_32 | 2 +- arch/sh/mm/Makefile_64 | 2 +- arch/sh/mm/mmap.c | 31 +++++++++++++++++++++++++++++++ 4 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 arch/sh/mm/mmap.c (limited to 'arch') diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 436c28539577..65eaae34e753 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -293,6 +293,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) */ #define xlate_dev_kmem_ptr(p) p +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +int valid_phys_addr_range(unsigned long addr, size_t size); +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + #endif /* __KERNEL__ */ #endif /* __ASM_SH_IO_H */ diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 70e0906023cc..f066e76da204 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -2,7 +2,7 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # -obj-y := init.o extable_32.o consistent.o +obj-y := init.o extable_32.o consistent.o mmap.o ifndef CONFIG_CACHE_OFF cache-$(CONFIG_CPU_SH2) := cache-sh2.o diff --git a/arch/sh/mm/Makefile_64 b/arch/sh/mm/Makefile_64 index 0d92a8a3ac9a..9481d0f54efd 100644 --- a/arch/sh/mm/Makefile_64 +++ b/arch/sh/mm/Makefile_64 @@ -2,7 +2,7 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # -obj-y := init.o consistent.o +obj-y := init.o consistent.o mmap.o mmu-y := tlb-nommu.o pg-nommu.o extable_32.o mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \ diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c new file mode 100644 index 000000000000..1f3cc3d92d3d --- /dev/null +++ b/arch/sh/mm/mmap.c @@ -0,0 +1,31 @@ +/* + * arch/sh/mm/mmap.c + * + * Copyright (C) 2008 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include + +/* + * You really shouldn't be using read() or write() on /dev/mem. This + * might go away in the future. + */ +int valid_phys_addr_range(unsigned long addr, size_t count) +{ + if (addr < (PAGE_OFFSET + (PFN_START << PAGE_SHIFT))) + return 0; + if (addr + count > __pa(high_memory)) + return 0; + + return 1; +} + +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + return 1; +} -- cgit From 31ea24bba77a16d3220b0822838785cbafb78175 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:34:59 +0000 Subject: MN10300: Clean up the misalignment handler a little Clean up the MN10300 misalignment handler a little by: (1) Use ilog2() rather than doing implementing log2() locally. (2) Make format_tbl[] const and static. (3) Making the debugging prints more consistent. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 32aa89dc3848..416c43baaa21 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -37,7 +37,7 @@ #include #if 0 -#define kdebug(FMT, ...) printk(KERN_DEBUG FMT, ##__VA_ARGS__) +#define kdebug(FMT, ...) printk(KERN_DEBUG "MISALIGN: "FMT"\n", ##__VA_ARGS__) #else #define kdebug(FMT, ...) do {} while (0) #endif @@ -50,14 +50,6 @@ static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned disp, unsigned long **_register); -static inline unsigned int_log2(unsigned x) -{ - unsigned y; - asm("bsch %1,%0" : "=r"(y) : "r"(x), "0"(0)); - return y; -} -#define log2(x) int_log2(x) - static const unsigned Dreg_index[] = { REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 }; @@ -88,7 +80,7 @@ enum format_id { FMT_D9, }; -struct { +static const struct { u_int8_t opsz, dispsz; } format_tbl[16] = { [FMT_S0] = { 8, 0 }, @@ -273,7 +265,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) void *address; unsigned tmp, npop; - kdebug("MISALIGN at %lx\n", regs->pc); + kdebug("==>misalignment({pc=%lx})", regs->pc); if (in_interrupt()) die("Misalignment trap in interrupt context", regs, code); @@ -295,7 +287,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) noc = 8; for (pop = mn10300_opcodes; pop->name; pop++) { - npop = log2(pop->opcode | pop->opmask); + npop = ilog2(pop->opcode | pop->opmask); if (npop <= 0 || npop > 31) continue; npop = (npop + 8) & ~7; @@ -391,7 +383,7 @@ transfer_failed: /* we matched the opcode */ found_opcode: - kdebug("MISALIGN: %lx: %x==%x { %x, %x }\n", + kdebug("%lx: %x==%x { %x, %x }", regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); tmp = format_tbl[pop->format].opsz; @@ -442,13 +434,13 @@ found_opcode: goto bad_reg_mode; if (strcmp(pop->name, "mov") == 0) { - kdebug("FIXUP: mov (%p),DARn\n", address); + kdebug("mov (%p),DARn", address); if (copy_from_user(&data, (void *) address, 4) != 0) goto transfer_failed; if (pop->params[0] & 0x1000000) *postinc += 4; } else if (strcmp(pop->name, "movhu") == 0) { - kdebug("FIXUP: movhu (%p),DARn\n", address); + kdebug("movhu (%p),DARn", address); data = 0; if (copy_from_user(&data, (void *) address, 2) != 0) goto transfer_failed; @@ -472,14 +464,13 @@ found_opcode: data = *store; if (strcmp(pop->name, "mov") == 0) { - kdebug("FIXUP: mov %lx,(%p)\n", data, address); + kdebug("mov %lx,(%p)", data, address); if (copy_to_user((void *) address, &data, 4) != 0) goto transfer_failed; if (pop->params[1] & 0x1000000) *postinc += 4; } else if (strcmp(pop->name, "movhu") == 0) { - kdebug("FIXUP: movhu %hx,(%p)\n", - (uint16_t) data, address); + kdebug("movhu %hx,(%p)", (uint16_t) data, address); if (copy_to_user((void *) address, &data, 2) != 0) goto transfer_failed; if (pop->params[1] & 0x1000000) -- cgit From 9f55588968095306d52bd30564666d4fadce5e39 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:04 +0000 Subject: MN10300: Add built-in testing for misalignment handler Add configurable built-in testing for the MN10300 misalignment handler. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/Kconfig.debug | 9 +++ arch/mn10300/mm/misalignment.c | 161 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 170 insertions(+) (limited to 'arch') diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index 524e33819f32..ff80e86b9bd2 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug @@ -15,6 +15,15 @@ config DEBUG_DECOMPRESS_KERNEL decompressing Linux seeing "Uncompressing Linux... " and "Ok, booting the kernel.\n" on console. +config TEST_MISALIGNMENT_HANDLER + bool "Run tests on the misalignment handler" + depends on DEBUG_KERNEL + default n + help + If you say Y here the kernel will execute a list of misaligned memory + accesses to make sure the misalignment handler deals them with + correctly. If it does not, the kernel will throw a BUG. + config KPROBES bool "Kprobes" depends on DEBUG_KERNEL diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 416c43baaa21..93e09c4be1db 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -650,3 +650,164 @@ static int misalignment_reg(unsigned long *registers, unsigned params, return 1; } + +/* + * misalignment handler tests + */ +#ifdef CONFIG_TEST_MISALIGNMENT_HANDLER +static u8 __initdata testbuf[512] __attribute__((aligned(16))) = { + [257] = 0x11, + [258] = 0x22, + [259] = 0x33, + [260] = 0x44, +}; + +#define ASSERTCMP(X, OP, Y) \ +do { \ + if (unlikely(!((X) OP (Y)))) { \ + printk(KERN_ERR "\n"); \ + printk(KERN_ERR "MISALIGN: Assertion failed at line %u\n", \ + __LINE__); \ + printk(KERN_ERR "0x%lx " #OP " 0x%lx is false\n", \ + (unsigned long)(X), (unsigned long)(Y)); \ + BUG(); \ + } \ +} while(0) + +static int __init test_misalignment(void) +{ + register void *r asm("e0"); + register u32 y asm("e1"); + void *p = testbuf, *q; + u32 tmp, tmp2, x; + + printk(KERN_NOTICE "==>test_misalignment() [testbuf=%p]\n", p); + p++; + + printk(KERN_NOTICE "___ MOV (Am),Dn ___\n"); + q = p + 256; + asm volatile("mov (%0),%1" : "+a"(q), "=d"(x)); + ASSERTCMP(q, ==, p + 256); + ASSERTCMP(x, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV (256,Am),Dn ___\n"); + q = p; + asm volatile("mov (256,%0),%1" : "+a"(q), "=d"(x)); + ASSERTCMP(q, ==, p); + ASSERTCMP(x, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV (Di,Am),Dn ___\n"); + tmp = 256; + q = p; + asm volatile("mov (%2,%0),%1" : "+a"(q), "=d"(x), "+d"(tmp)); + ASSERTCMP(q, ==, p); + ASSERTCMP(x, ==, 0x44332211); + ASSERTCMP(tmp, ==, 256); + + printk(KERN_NOTICE "___ MOV (256,Rm),Rn ___\n"); + r = p; + asm volatile("mov (256,%0),%1" : "+r"(r), "=r"(y)); + ASSERTCMP(r, ==, p); + ASSERTCMP(y, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV (Rm+),Rn ___\n"); + r = p + 256; + asm volatile("mov (%0+),%1" : "+r"(r), "=r"(y)); + ASSERTCMP(r, ==, p + 256 + 4); + ASSERTCMP(y, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV (Rm+,8),Rn ___\n"); + r = p + 256; + asm volatile("mov (%0+,8),%1" : "+r"(r), "=r"(y)); + ASSERTCMP(r, ==, p + 256 + 8); + ASSERTCMP(y, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV (7,SP),Rn ___\n"); + asm volatile( + "add -16,sp \n" + "mov +0x11,%0 \n" + "movbu %0,(7,sp) \n" + "mov +0x22,%0 \n" + "movbu %0,(8,sp) \n" + "mov +0x33,%0 \n" + "movbu %0,(9,sp) \n" + "mov +0x44,%0 \n" + "movbu %0,(10,sp) \n" + "mov (7,sp),%1 \n" + "add +16,sp \n" + : "+a"(q), "=d"(x)); + ASSERTCMP(x, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV (259,SP),Rn ___\n"); + asm volatile( + "add -264,sp \n" + "mov +0x11,%0 \n" + "movbu %0,(259,sp) \n" + "mov +0x22,%0 \n" + "movbu %0,(260,sp) \n" + "mov +0x33,%0 \n" + "movbu %0,(261,sp) \n" + "mov +0x55,%0 \n" + "movbu %0,(262,sp) \n" + "mov (259,sp),%1 \n" + "add +264,sp \n" + : "+d"(tmp), "=d"(x)); + ASSERTCMP(x, ==, 0x55332211); + + printk(KERN_NOTICE "___ MOV (260,SP),Rn ___\n"); + asm volatile( + "add -264,sp \n" + "mov +0x11,%0 \n" + "movbu %0,(260,sp) \n" + "mov +0x22,%0 \n" + "movbu %0,(261,sp) \n" + "mov +0x33,%0 \n" + "movbu %0,(262,sp) \n" + "mov +0x55,%0 \n" + "movbu %0,(263,sp) \n" + "mov (260,sp),%1 \n" + "add +264,sp \n" + : "+d"(tmp), "=d"(x)); + ASSERTCMP(x, ==, 0x55332211); + + + printk(KERN_NOTICE "___ MOV_LNE ___\n"); + tmp = 1; + tmp2 = 2; + q = p + 256; + asm volatile( + "setlb \n" + "mov %2,%3 \n" + "mov %1,%2 \n" + "cmp +0,%1 \n" + "mov_lne (%0+,4),%1" + : "+r"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) + : + : "cc"); + ASSERTCMP(q, ==, p + 256 + 12); + ASSERTCMP(x, ==, 0x44332211); + + printk(KERN_NOTICE "___ MOV in SETLB ___\n"); + tmp = 1; + tmp2 = 2; + q = p + 256; + asm volatile( + "setlb \n" + "mov %1,%3 \n" + "mov (%0+),%1 \n" + "cmp +0,%1 \n" + "lne " + : "+a"(q), "+d"(tmp), "+d"(tmp2), "=d"(x) + : + : "cc"); + + ASSERTCMP(q, ==, p + 256 + 8); + ASSERTCMP(x, ==, 0x44332211); + + printk(KERN_NOTICE "<==test_misalignment()\n"); + return 0; +} + +arch_initcall(test_misalignment); + +#endif /* CONFIG_TEST_MISALIGNMENT_HANDLER */ -- cgit From ee6e740cf7e5605b353af539eb9a6e17948747b6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:09 +0000 Subject: MN10300: Add further misalignment fixups Add further misalignment fixup support to the MN10300 arch, notably for ABS32 and SP+disp addressing. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 54 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 93e09c4be1db..ab03bac497cb 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -120,9 +120,14 @@ enum value_id { SD24, /* 24-bit signed displacement */ SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */ SIMM8, /* 8-bit signed immediate */ + IMM8, /* 8-bit unsigned immediate */ + IMM16, /* 16-bit unsigned immediate */ IMM24, /* 24-bit unsigned immediate */ IMM32, /* 32-bit unsigned immediate */ - IMM32_HIGH8, /* 32-bit unsigned immediate, high 8-bits in opcode */ + IMM32_HIGH8, /* 32-bit unsigned immediate, LSB in opcode */ + + IMM32_MEM, /* 32-bit unsigned displacement */ + IMM32_HIGH8_MEM, /* 32-bit unsigned displacement, LSB in opcode */ DN0 = DM0, DN1 = DM1, @@ -177,6 +182,10 @@ struct mn10300_opcode { Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static const struct mn10300_opcode mn10300_opcodes[] = { +{ "mov", 0x4200, 0xf300, 0, FMT_S1, 0, {DM1, MEM2(IMM8, SP)}}, +{ "mov", 0x4300, 0xf300, 0, FMT_S1, 0, {AM1, MEM2(IMM8, SP)}}, +{ "mov", 0x5800, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), DN0}}, +{ "mov", 0x5c00, 0xfc00, 0, FMT_S1, 0, {MEM2(IMM8, SP), AN0}}, { "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}}, { "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}}, { "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}}, @@ -199,24 +208,46 @@ static const struct mn10300_opcode mn10300_opcodes[] = { { "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, { "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}}, { "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}}, +{ "mov", 0xfa900000, 0xfff30000, 0, FMT_D2, 0, {AM1, MEM2(IMM16, SP)}}, +{ "mov", 0xfa910000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, +{ "mov", 0xfab00000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), AN0}}, +{ "mov", 0xfab40000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, { "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, { "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, { "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, { "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, +{ "mov", 0xfb8a0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, { "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, +{ "mov", 0xfb9a0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, { "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, { "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, { "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, { "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}}, { "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}}, +{ "mov", 0xfc800000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM(IMM32_MEM)}}, +{ "mov", 0xfc810000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, +{ "mov", 0xfc900000, 0xfff30000, 0, FMT_D4, 0, {AM1, MEM2(IMM32, SP)}}, +{ "mov", 0xfc910000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, +{ "mov", 0xfca00000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), AN0}}, +{ "mov", 0xfca40000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, +{ "mov", 0xfcb00000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), AN0}}, +{ "mov", 0xfcb40000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, { "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, { "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, { "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, { "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, +{ "mov", 0xfd8a0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, +{ "mov", 0xfd9a0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, +{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, { "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, +{ "mov", 0xfe0e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, { "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, +{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, +{ "mov", 0xfe1e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, { "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, +{ "mov", 0xfe8a0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, +{ "mov", 0xfe9a0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}}, { "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}}, @@ -224,26 +255,42 @@ static const struct mn10300_opcode mn10300_opcodes[] = { { "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}}, { "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}}, { "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, +{ "movhu", 0xf89300, 0xfff300, 0, FMT_D1, 0, {DM1, MEM2(IMM8, SP)}}, +{ "movhu", 0xf8bc00, 0xfffc00, 0, FMT_D1, 0, {MEM2(IMM8, SP), DN0}}, { "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, { "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, { "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, { "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}}, { "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}}, { "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}}, +{ "movhu", 0xfa930000, 0xfff30000, 0, FMT_D2, 0, {DM1, MEM2(IMM16, SP)}}, +{ "movhu", 0xfabc0000, 0xfffc0000, 0, FMT_D2, 0, {MEM2(IMM16, SP), DN0}}, { "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}}, { "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}}, +{ "movhu", 0xfbca0000, 0xffff0f00, 0, FMT_D7, AM33, {MEM2(IMM8, SP), RN2}}, { "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}}, +{ "movhu", 0xfbda0000, 0xffff0f00, 0, FMT_D7, AM33, {RM2, MEM2(IMM8, SP)}}, { "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}}, { "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}}, { "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}}, { "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}}, { "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}}, +{ "movhu", 0xfc830000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM(IMM32_MEM)}}, +{ "movhu", 0xfc930000, 0xfff30000, 0, FMT_D4, 0, {DM1, MEM2(IMM32, SP)}}, +{ "movhu", 0xfcac0000, 0xfffc0000, 0, FMT_D4, 0, {MEM(IMM32_MEM), DN0}}, +{ "movhu", 0xfcbc0000, 0xfffc0000, 0, FMT_D4, 0, {MEM2(IMM32, SP), DN0}}, { "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}}, { "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}}, +{ "movhu", 0xfdca0000, 0xffff0f00, 0, FMT_D8, AM33, {MEM2(IMM24, SP), RN2}}, +{ "movhu", 0xfdda0000, 0xffff0f00, 0, FMT_D8, AM33, {RM2, MEM2(IMM24, SP)}}, { "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}}, { "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}}, { "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}}, +{ "movhu", 0xfe4e0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM(IMM32_HIGH8_MEM), RN2}}, { "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}}, +{ "movhu", 0xfe5e0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM(IMM32_HIGH8_MEM)}}, +{ "movhu", 0xfeca0000, 0xffff0f00, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8, SP), RN2}}, +{ "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, { 0, 0, 0, 0, 0, 0, {0}}, @@ -552,6 +599,9 @@ static int misalignment_addr(unsigned long *registers, unsigned params, postinc = ®isters[Rreg_index[disp >> 4 & 0x0f]]; address += *postinc; break; + case SP: + address += registers[REG_SP >> 2]; + break; case SD8: case SIMM8: @@ -575,7 +625,9 @@ static int misalignment_addr(unsigned long *registers, unsigned params, address += disp & 0x00ffffff; break; case IMM32: + case IMM32_MEM: case IMM32_HIGH8: + case IMM32_HIGH8_MEM: address += disp; break; default: -- cgit From b308bf3be1f75207c307eea9ada90e0b76194911 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:14 +0000 Subject: MN10300: Extract the displacement from an insn correctly in misalignment fixup Extract the displacement from an MN10300 instruction correctly in the misalignment fixup handler. The code should extract the displacement in LSB order, not MSB order. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index ab03bac497cb..614c32b6325b 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -43,11 +43,11 @@ #endif static int misalignment_addr(unsigned long *registers, unsigned params, - unsigned opcode, unsigned disp, + unsigned opcode, unsigned long disp, void **_address, unsigned long **_postinc); static int misalignment_reg(unsigned long *registers, unsigned params, - unsigned opcode, unsigned disp, + unsigned opcode, unsigned long disp, unsigned long **_register); static const unsigned Dreg_index[] = { @@ -304,13 +304,13 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) const struct exception_table_entry *fixup; const struct mn10300_opcode *pop; unsigned long *registers = (unsigned long *) regs; - unsigned long data, *store, *postinc; + unsigned long data, *store, *postinc, disp; mm_segment_t seg; siginfo_t info; - uint32_t opcode, disp, noc, xo, xm; + uint32_t opcode, noc, xo, xm; uint8_t *pc, byte; void *address; - unsigned tmp, npop; + unsigned tmp, npop, dispsz, loop; kdebug("==>misalignment({pc=%lx})", regs->pc); @@ -445,17 +445,17 @@ found_opcode: /* grab the extra displacement (note it's LSB first) */ disp = 0; - tmp = format_tbl[pop->format].dispsz >> 3; - while (tmp > 0) { - tmp--; - disp <<= 8; - + dispsz = format_tbl[pop->format].dispsz; + for (loop = 0; loop < dispsz; loop += 8) { pc++; if (__get_user(byte, pc) != 0) goto fetch_error; - disp |= byte; + disp |= byte << loop; + kdebug("{%p} disp[%02x]=%02x", pc, loop, byte); } + kdebug("disp=%lx", disp); + set_fs(KERNEL_XDS); if (fixup || regs->epsw & EPSW_nSL) set_fs(seg); @@ -538,7 +538,7 @@ found_opcode: * determine the address that was being accessed */ static int misalignment_addr(unsigned long *registers, unsigned params, - unsigned opcode, unsigned disp, + unsigned opcode, unsigned long disp, void **_address, unsigned long **_postinc) { unsigned long *postinc = NULL, address = 0, tmp; @@ -644,7 +644,7 @@ static int misalignment_addr(unsigned long *registers, unsigned params, * determine the register that is acting as source/dest */ static int misalignment_reg(unsigned long *registers, unsigned params, - unsigned opcode, unsigned disp, + unsigned opcode, unsigned long disp, unsigned long **_register) { params &= 0x7fffffff; -- cgit From f911c685d65ea1855eb3f95b8eaf871e5d9342fa Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:20 +0000 Subject: MN10300: Fix register-postinc addressing misalignment handling Fix misalignment handling of operands with register postincrement addressing. The flag to indicate that postincrement is required should not be interpreted as an specification of a value to be added to the address. Also add BUGs to catch unimplemented parameter markings in the opcodes table. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 614c32b6325b..066ddc625a14 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -543,7 +543,7 @@ static int misalignment_addr(unsigned long *registers, unsigned params, { unsigned long *postinc = NULL, address = 0, tmp; - params &= 0x7fffffff; + params &= 0x00ffffff; do { switch (params & 0xff) { @@ -631,6 +631,7 @@ static int misalignment_addr(unsigned long *registers, unsigned params, address += disp; break; default: + BUG(); return 0; } } while ((params >>= 8)); @@ -697,6 +698,7 @@ static int misalignment_reg(unsigned long *registers, unsigned params, break; default: + BUG(); return 0; } -- cgit From aefefbbec1ad25bafa97a7a1db25313ce26563e2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:25 +0000 Subject: MN10300: Allow misalignment fixup in interrupt handling Allow misalignment fixup in interrupt handling in the MN10300 arch. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 066ddc625a14..05a3c2f8d2df 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -314,9 +314,6 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) kdebug("==>misalignment({pc=%lx})", regs->pc); - if (in_interrupt()) - die("Misalignment trap in interrupt context", regs, code); - if (regs->epsw & EPSW_IE) asm volatile("or %0,epsw" : : "i"(EPSW_IE)); -- cgit From ddb6d05cbaea76eddbee52585152ab801a8aedc7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:30 +0000 Subject: MN10300: Perform misalignment fixups of MOV_Lcc Perform misalignment fixups of the MOV_Lcc instructions (move postinc memory to register and conditionally loop). Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 95 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 94 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 05a3c2f8d2df..d5b45bb7d108 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -50,6 +50,8 @@ static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, unsigned long **_register); +static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode); + static const unsigned Dreg_index[] = { REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2 }; @@ -78,6 +80,7 @@ enum format_id { FMT_D7, FMT_D8, FMT_D9, + FMT_D10, }; static const struct { @@ -95,6 +98,7 @@ static const struct { [FMT_D7] = { 24, 8 }, [FMT_D8] = { 24, 24 }, [FMT_D9] = { 24, 32 }, + [FMT_D10] = { 32, 0 }, }; enum value_id { @@ -293,6 +297,19 @@ static const struct mn10300_opcode mn10300_opcodes[] = { { "movhu", 0xfeda0000, 0xffff0f00, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, SP)}}, { "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}}, { "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}}, + +{ "mov_llt", 0xf7e00000, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lgt", 0xf7e00001, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lge", 0xf7e00002, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lle", 0xf7e00003, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lcs", 0xf7e00004, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lhi", 0xf7e00005, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lcc", 0xf7e00006, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lls", 0xf7e00007, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_leq", 0xf7e00008, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, +{ "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, + { 0, 0, 0, 0, 0, 0, {0}}, }; @@ -477,7 +494,8 @@ found_opcode: &store)) goto bad_reg_mode; - if (strcmp(pop->name, "mov") == 0) { + if (strcmp(pop->name, "mov") == 0 || + memcmp(pop->name, "mov_l", 5) == 0) { kdebug("mov (%p),DARn", address); if (copy_from_user(&data, (void *) address, 4) != 0) goto transfer_failed; @@ -495,6 +513,7 @@ found_opcode: } *store = data; + kdebug("loaded %lx", data); } else { /* move register to memory */ if (!misalignment_reg(registers, pop->params[0], opcode, disp, @@ -527,6 +546,11 @@ found_opcode: tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz; regs->pc += tmp >> 3; + /* handle MOV_Lcc, which are currently the only FMT_D10 insns that + * access memory */ + if (pop->format == FMT_D10) + misalignment_MOV_Lcc(regs, opcode); + set_fs(seg); return; } @@ -702,6 +726,75 @@ static int misalignment_reg(unsigned long *registers, unsigned params, return 1; } +/* + * handle the conditional loop part of the move-and-loop instructions + */ +static void misalignment_MOV_Lcc(struct pt_regs *regs, uint32_t opcode) +{ + unsigned long epsw = regs->epsw; + unsigned long NxorV; + + kdebug("MOV_Lcc %x [flags=%lx]", opcode, epsw & 0xf); + + /* calculate N^V and shift onto the same bit position as Z */ + NxorV = ((epsw >> 3) ^ epsw >> 1) & 1; + + switch (opcode & 0xf) { + case 0x0: /* MOV_LLT: N^V */ + if (NxorV) + goto take_the_loop; + return; + case 0x1: /* MOV_LGT: ~(Z or (N^V))*/ + if (!((epsw & EPSW_FLAG_Z) | NxorV)) + goto take_the_loop; + return; + case 0x2: /* MOV_LGE: ~(N^V) */ + if (!NxorV) + goto take_the_loop; + return; + case 0x3: /* MOV_LLE: Z or (N^V) */ + if ((epsw & EPSW_FLAG_Z) | NxorV) + goto take_the_loop; + return; + + case 0x4: /* MOV_LCS: C */ + if (epsw & EPSW_FLAG_C) + goto take_the_loop; + return; + case 0x5: /* MOV_LHI: ~(C or Z) */ + if (!(epsw & (EPSW_FLAG_C | EPSW_FLAG_Z))) + goto take_the_loop; + return; + case 0x6: /* MOV_LCC: ~C */ + if (!(epsw & EPSW_FLAG_C)) + goto take_the_loop; + return; + case 0x7: /* MOV_LLS: C or Z */ + if (epsw & (EPSW_FLAG_C | EPSW_FLAG_Z)) + goto take_the_loop; + return; + + case 0x8: /* MOV_LEQ: Z */ + if (epsw & EPSW_FLAG_Z) + goto take_the_loop; + return; + case 0x9: /* MOV_LNE: ~Z */ + if (!(epsw & EPSW_FLAG_Z)) + goto take_the_loop; + return; + case 0xa: /* MOV_LRA: always */ + goto take_the_loop; + + default: + BUG(); + } + +take_the_loop: + /* wind the PC back to just after the SETLB insn */ + kdebug("loop LAR=%lx", regs->lar); + regs->pc = regs->lar - 4; +} + /* * misalignment handler tests */ -- cgit From 6d615c78fb92fbd80e52ba7acb2d4c4d503006c3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:35 +0000 Subject: MN10300: Handle misaligned postinc-with-imm addressing mode correctly Correctly handle misalignment in MOV instructions with postinc-with-immediate addressing mode operands. In these, the immediate value is the increment to be applied the address register, not the displacement to the address. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 107 +++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index d5b45bb7d108..61e65ec47db8 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -44,7 +44,8 @@ static int misalignment_addr(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, - void **_address, unsigned long **_postinc); + void **_address, unsigned long **_postinc, + unsigned long *_inc); static int misalignment_reg(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, @@ -150,7 +151,7 @@ enum value_id { }; struct mn10300_opcode { - const char *name; + const char name[8]; u_int32_t opcode; u_int32_t opmask; unsigned exclusion; @@ -310,7 +311,7 @@ static const struct mn10300_opcode mn10300_opcodes[] = { { "mov_lne", 0xf7e00009, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, { "mov_lra", 0xf7e0000a, 0xffff000f, 0x22, FMT_D10, AM33, {MEMINC2 (RN4,SIMM4_2), RM6}}, -{ 0, 0, 0, 0, 0, 0, {0}}, +{ "", 0, 0, 0, 0, 0, {0}}, }; /* @@ -321,11 +322,11 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) const struct exception_table_entry *fixup; const struct mn10300_opcode *pop; unsigned long *registers = (unsigned long *) regs; - unsigned long data, *store, *postinc, disp; + unsigned long data, *store, *postinc, disp, inc; mm_segment_t seg; siginfo_t info; uint32_t opcode, noc, xo, xm; - uint8_t *pc, byte; + uint8_t *pc, byte, datasz; void *address; unsigned tmp, npop, dispsz, loop; @@ -347,7 +348,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) opcode = byte; noc = 8; - for (pop = mn10300_opcodes; pop->name; pop++) { + for (pop = mn10300_opcodes; pop->name[0]; pop++) { npop = ilog2(pop->opcode | pop->opmask); if (npop <= 0 || npop > 31) continue; @@ -484,32 +485,31 @@ found_opcode: goto failed; } + /* determine the data transfer size of the move */ + if (pop->name[3] == 0 || /* "mov" */ + pop->name[4] == 'l') /* mov_lcc */ + inc = datasz = 4; + else if (pop->name[3] == 'h') /* movhu */ + inc = datasz = 2; + else + goto unsupported_instruction; + if (pop->params[0] & 0x80000000) { /* move memory to register */ if (!misalignment_addr(registers, pop->params[0], opcode, disp, - &address, &postinc)) + &address, &postinc, &inc)) goto bad_addr_mode; if (!misalignment_reg(registers, pop->params[1], opcode, disp, &store)) goto bad_reg_mode; - if (strcmp(pop->name, "mov") == 0 || - memcmp(pop->name, "mov_l", 5) == 0) { - kdebug("mov (%p),DARn", address); - if (copy_from_user(&data, (void *) address, 4) != 0) - goto transfer_failed; - if (pop->params[0] & 0x1000000) - *postinc += 4; - } else if (strcmp(pop->name, "movhu") == 0) { - kdebug("movhu (%p),DARn", address); - data = 0; - if (copy_from_user(&data, (void *) address, 2) != 0) - goto transfer_failed; - if (pop->params[0] & 0x1000000) - *postinc += 2; - } else { - goto unsupported_instruction; + kdebug("mov%u (%p),DARn", datasz, address); + if (copy_from_user(&data, (void *) address, datasz) != 0) + goto transfer_failed; + if (pop->params[0] & 0x1000000) { + kdebug("inc=%lx", inc); + *postinc += inc; } *store = data; @@ -521,26 +521,16 @@ found_opcode: goto bad_reg_mode; if (!misalignment_addr(registers, pop->params[1], opcode, disp, - &address, &postinc)) + &address, &postinc, &inc)) goto bad_addr_mode; data = *store; - if (strcmp(pop->name, "mov") == 0) { - kdebug("mov %lx,(%p)", data, address); - if (copy_to_user((void *) address, &data, 4) != 0) - goto transfer_failed; - if (pop->params[1] & 0x1000000) - *postinc += 4; - } else if (strcmp(pop->name, "movhu") == 0) { - kdebug("movhu %hx,(%p)", (uint16_t) data, address); - if (copy_to_user((void *) address, &data, 2) != 0) - goto transfer_failed; - if (pop->params[1] & 0x1000000) - *postinc += 2; - } else { - goto unsupported_instruction; - } + kdebug("mov%u %lx,(%p)", datasz, data, address); + if (copy_to_user((void *) address, &data, datasz) != 0) + goto transfer_failed; + if (pop->params[1] & 0x1000000) + *postinc += inc; } tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz; @@ -560,10 +550,17 @@ found_opcode: */ static int misalignment_addr(unsigned long *registers, unsigned params, unsigned opcode, unsigned long disp, - void **_address, unsigned long **_postinc) + void **_address, unsigned long **_postinc, + unsigned long *_inc) { unsigned long *postinc = NULL, address = 0, tmp; + if (!(params & 0x1000000)) { + kdebug("noinc"); + *_inc = 0; + _inc = NULL; + } + params &= 0x00ffffff; do { @@ -624,32 +621,40 @@ static int misalignment_addr(unsigned long *registers, unsigned params, address += registers[REG_SP >> 2]; break; + /* displacements are either to be added to the address + * before use, or, in the case of post-inc addressing, + * to be added into the base register after use */ case SD8: case SIMM8: - address += (int32_t) (int8_t) (disp & 0xff); - break; + disp = (long) (int8_t) (disp & 0xff); + goto displace_or_inc; case SD16: - address += (int32_t) (int16_t) (disp & 0xffff); - break; + disp = (long) (int16_t) (disp & 0xffff); + goto displace_or_inc; case SD24: tmp = disp << 8; asm("asr 8,%0" : "=r"(tmp) : "0"(tmp)); - address += tmp; - break; + disp = (long) tmp; + goto displace_or_inc; case SIMM4_2: tmp = opcode >> 4 & 0x0f; tmp <<= 28; asm("asr 28,%0" : "=r"(tmp) : "0"(tmp)); - address += tmp; - break; + disp = (long) tmp; + goto displace_or_inc; case IMM24: - address += disp & 0x00ffffff; - break; + disp &= 0x00ffffff; + goto displace_or_inc; case IMM32: case IMM32_MEM: case IMM32_HIGH8: case IMM32_HIGH8_MEM: - address += disp; + displace_or_inc: + kdebug("%s %lx", _inc ? "incr" : "disp", disp); + if (!_inc) + address += disp; + else + *_inc = disp; break; default: BUG(); -- cgit From 852c15b7362cf34e0d7949abefbfeeb0845d93b4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:40 +0000 Subject: MN10300: Fix misaligned index-register addressing handling Fix misalignment handling for an address calculated from the sum of two registers. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 61e65ec47db8..a59836804bc6 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -570,11 +570,11 @@ static int misalignment_addr(unsigned long *registers, unsigned params, address += *postinc; break; case DM1: - postinc = ®isters[Dreg_index[opcode >> 2 & 0x0c]]; + postinc = ®isters[Dreg_index[opcode >> 2 & 0x03]]; address += *postinc; break; case DM2: - postinc = ®isters[Dreg_index[opcode >> 4 & 0x30]]; + postinc = ®isters[Dreg_index[opcode >> 4 & 0x03]]; address += *postinc; break; case AM0: @@ -582,11 +582,11 @@ static int misalignment_addr(unsigned long *registers, unsigned params, address += *postinc; break; case AM1: - postinc = ®isters[Areg_index[opcode >> 2 & 0x0c]]; + postinc = ®isters[Areg_index[opcode >> 2 & 0x03]]; address += *postinc; break; case AM2: - postinc = ®isters[Areg_index[opcode >> 4 & 0x30]]; + postinc = ®isters[Areg_index[opcode >> 4 & 0x03]]; address += *postinc; break; case RM0: -- cgit From d3bd462865421dd8be310fac2d2f6da6069f9679 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:45 +0000 Subject: MN10300: Handle misaligned SP-based operands Support misalignment handling for instructions that have kernel SP-based address operands, including fixing those that include IMM8 or IMM16 displacements. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index a59836804bc6..e247a6e1b8de 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -42,8 +42,9 @@ #define kdebug(FMT, ...) do {} while (0) #endif -static int misalignment_addr(unsigned long *registers, unsigned params, - unsigned opcode, unsigned long disp, +static int misalignment_addr(unsigned long *registers, unsigned long sp, + unsigned params, unsigned opcode, + unsigned long disp, void **_address, unsigned long **_postinc, unsigned long *_inc); @@ -322,7 +323,7 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) const struct exception_table_entry *fixup; const struct mn10300_opcode *pop; unsigned long *registers = (unsigned long *) regs; - unsigned long data, *store, *postinc, disp, inc; + unsigned long data, *store, *postinc, disp, inc, sp; mm_segment_t seg; siginfo_t info; uint32_t opcode, noc, xo, xm; @@ -330,7 +331,12 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) void *address; unsigned tmp, npop, dispsz, loop; - kdebug("==>misalignment({pc=%lx})", regs->pc); + if (user_mode(regs)) + sp = regs->sp; + else + sp = (unsigned long) regs + sizeof(*regs); + + kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp); if (regs->epsw & EPSW_IE) asm volatile("or %0,epsw" : : "i"(EPSW_IE)); @@ -496,7 +502,8 @@ found_opcode: if (pop->params[0] & 0x80000000) { /* move memory to register */ - if (!misalignment_addr(registers, pop->params[0], opcode, disp, + if (!misalignment_addr(registers, sp, + pop->params[0], opcode, disp, &address, &postinc, &inc)) goto bad_addr_mode; @@ -520,7 +527,8 @@ found_opcode: &store)) goto bad_reg_mode; - if (!misalignment_addr(registers, pop->params[1], opcode, disp, + if (!misalignment_addr(registers, sp, + pop->params[1], opcode, disp, &address, &postinc, &inc)) goto bad_addr_mode; @@ -548,8 +556,9 @@ found_opcode: /* * determine the address that was being accessed */ -static int misalignment_addr(unsigned long *registers, unsigned params, - unsigned opcode, unsigned long disp, +static int misalignment_addr(unsigned long *registers, unsigned long sp, + unsigned params, unsigned opcode, + unsigned long disp, void **_address, unsigned long **_postinc, unsigned long *_inc) { @@ -618,7 +627,7 @@ static int misalignment_addr(unsigned long *registers, unsigned params, address += *postinc; break; case SP: - address += registers[REG_SP >> 2]; + address += sp; break; /* displacements are either to be added to the address @@ -642,6 +651,12 @@ static int misalignment_addr(unsigned long *registers, unsigned params, asm("asr 28,%0" : "=r"(tmp) : "0"(tmp)); disp = (long) tmp; goto displace_or_inc; + case IMM8: + disp &= 0x000000ff; + goto displace_or_inc; + case IMM16: + disp &= 0x0000ffff; + goto displace_or_inc; case IMM24: disp &= 0x00ffffff; goto displace_or_inc; -- cgit From bd9384a9fdd6c15da6b01b2844c3471d07a45d64 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:50 +0000 Subject: MN10300: Don't handle misaligned loading and storing of SP Don't handle the misaligned loading and storing of the SP register as in C code that's most certainly a compiler bug. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index e247a6e1b8de..7b670a3d7659 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -204,8 +204,6 @@ static const struct mn10300_opcode mn10300_opcodes[] = { { "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}}, { "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}}, { "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}}, -{ "mov", 0xf8f000, 0xfffc00, 0, FMT_D1, AM33, {MEM2(SD8, AM0), SP}}, -{ "mov", 0xf8f400, 0xfffc00, 0, FMT_D1, AM33, {SP, MEM2(SD8, AN0)}}, { "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}}, { "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}}, { "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}}, -- cgit From 58a47481b6ecb6dd05ab4a788e1f2ae3c7c46f57 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 12 Nov 2008 15:35:55 +0000 Subject: MN10300: Don't do misalignment handling for userspace Don't do misalignment handling for userspace misalignment faults: just generate an appropriate SIGBUS instead. Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- arch/mn10300/mm/misalignment.c | 52 +++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c index 7b670a3d7659..94c4a4358065 100644 --- a/arch/mn10300/mm/misalignment.c +++ b/arch/mn10300/mm/misalignment.c @@ -329,10 +329,11 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) void *address; unsigned tmp, npop, dispsz, loop; + /* we don't fix up userspace misalignment faults */ if (user_mode(regs)) - sp = regs->sp; - else - sp = (unsigned long) regs + sizeof(*regs); + goto bus_error; + + sp = (unsigned long) regs + sizeof(*regs); kdebug("==>misalignment({pc=%lx,sp=%lx})", regs->pc, sp); @@ -386,15 +387,15 @@ asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code) } /* didn't manage to find a fixup */ - if (!user_mode(regs)) - printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n", - regs->pc, opcode); + printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n", + regs->pc, opcode); failed: set_fs(seg); if (die_if_no_fixup("misalignment error", regs, code)) return; +bus_error: info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; @@ -404,31 +405,27 @@ failed: /* error reading opcodes */ fetch_error: - if (!user_mode(regs)) - printk(KERN_CRIT - "MISALIGN: %p: fault whilst reading instruction data\n", - pc); + printk(KERN_CRIT + "MISALIGN: %p: fault whilst reading instruction data\n", + pc); goto failed; bad_addr_mode: - if (!user_mode(regs)) - printk(KERN_CRIT - "MISALIGN: %lx: unsupported addressing mode %x\n", - regs->pc, opcode); + printk(KERN_CRIT + "MISALIGN: %lx: unsupported addressing mode %x\n", + regs->pc, opcode); goto failed; bad_reg_mode: - if (!user_mode(regs)) - printk(KERN_CRIT - "MISALIGN: %lx: unsupported register mode %x\n", - regs->pc, opcode); + printk(KERN_CRIT + "MISALIGN: %lx: unsupported register mode %x\n", + regs->pc, opcode); goto failed; unsupported_instruction: - if (!user_mode(regs)) - printk(KERN_CRIT - "MISALIGN: %lx: unsupported instruction %x (%s)\n", - regs->pc, opcode, pop->name); + printk(KERN_CRIT + "MISALIGN: %lx: unsupported instruction %x (%s)\n", + regs->pc, opcode, pop->name); goto failed; transfer_failed: @@ -476,16 +473,14 @@ found_opcode: kdebug("disp=%lx", disp); set_fs(KERNEL_XDS); - if (fixup || regs->epsw & EPSW_nSL) + if (fixup) set_fs(seg); tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000; if (!tmp) { - if (!user_mode(regs)) - printk(KERN_CRIT - "MISALIGN: %lx:" - " insn not move to/from memory %x\n", - regs->pc, opcode); + printk(KERN_CRIT + "MISALIGN: %lx: insn not move to/from memory %x\n", + regs->pc, opcode); goto failed; } @@ -548,7 +543,6 @@ found_opcode: misalignment_MOV_Lcc(regs, opcode); set_fs(seg); - return; } /* -- cgit From bfbedf787c6b77270679429caadb044b2d33c94c Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Thu, 13 Nov 2008 15:33:45 +0900 Subject: sh: early printk port type fix Add PORT_SCIF to unbreak the early printk code. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/early_printk.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index 6b7d166694e2..98a29a782301 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -75,6 +75,7 @@ static struct console bios_console = { #endif static struct uart_port scif_port = { + .type = PORT_SCIF, .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT, .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT, }; -- cgit From 10840f034e2329150ce0e683e636ea13b268d333 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 13 Nov 2008 15:38:02 +0900 Subject: sh: Don't factor in PAGE_OFFSET for valid_phys_addr_range() check. Signed-off-by: Paul Mundt --- arch/sh/mm/mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 1f3cc3d92d3d..8837d511710a 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -17,7 +17,7 @@ */ int valid_phys_addr_range(unsigned long addr, size_t count) { - if (addr < (PAGE_OFFSET + (PFN_START << PAGE_SHIFT))) + if (addr < __MEMORY_START) return 0; if (addr + count > __pa(high_memory)) return 0; -- cgit From 2cd0ebc83d771220eeddec91fd6d4cfefc2cc46e Mon Sep 17 00:00:00 2001 From: Francesco VIRLINZI Date: Wed, 15 Oct 2008 11:58:24 +0200 Subject: sh: Fixed the TMU0 reload value on resume This patch fixes the TMU0 interrupt frequency on suspend/resume. During the resume the kernel reprograms the TMU0.ClockEvent mode but if the mode is periodic than the TMU0.TCOR is updated with a random wrong value without taking care latest valid saved value. There was no problem with No_HZ system where TMU0.TCOR isn't used. Signed-off-by: Francesco M. Virlinzi Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/timers/timer-tmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index aaaf90d06b85..3c61ddd4d43e 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c @@ -120,7 +120,7 @@ static void tmu_set_mode(enum clock_event_mode mode, { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR); + ctrl_outl(tmu_latest_interval[TMU0], TMU0_TCOR); break; case CLOCK_EVT_MODE_ONESHOT: ctrl_outl(0, TMU0_TCOR); -- cgit From 5d52013cbb3d39bde9f5a6023193058eeb112e98 Mon Sep 17 00:00:00 2001 From: Stuart MENEFY Date: Fri, 10 Oct 2008 19:49:30 +0100 Subject: sh: __copy_user function can corrupt the stack in case of exception The __copy_user function can corrupt the stack in the case of a non-trivial length of data, and either of the first two move instructions cause an exception. This is because the fixup for these two instructions is mapped to the no_pop case, but these instructions execute after the stack is pushed. This change creates an explicit NO_POP exception mapping macro, and uses it for the two instructions executed in the trivial case where no stack pushes occur. More information at ST Linux bugzilla: https://bugzilla.stlinux.com/show_bug.cgi?id=4824 Signed-off-by: Dylan Reid Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/lib/copy_page.S | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S index 5d12e657be34..43de7e8e4e17 100644 --- a/arch/sh/lib/copy_page.S +++ b/arch/sh/lib/copy_page.S @@ -80,6 +80,11 @@ ENTRY(copy_page) .section __ex_table, "a"; \ .long 9999b, 6000f ; \ .previous +#define EX_NO_POP(...) \ + 9999: __VA_ARGS__ ; \ + .section __ex_table, "a"; \ + .long 9999b, 6005f ; \ + .previous ENTRY(__copy_user) ! Check if small number of bytes mov #11,r0 @@ -139,9 +144,9 @@ EX( mov.b r1,@r4 ) bt 1f 2: -EX( mov.b @r5+,r0 ) +EX_NO_POP( mov.b @r5+,r0 ) dt r6 -EX( mov.b r0,@r4 ) +EX_NO_POP( mov.b r0,@r4 ) bf/s 2b add #1,r4 @@ -150,7 +155,7 @@ EX( mov.b r0,@r4 ) # Exception handler: .section .fixup, "ax" -6000: +6005: mov.l 8000f,r1 mov r3,r0 jmp @r1 -- cgit From 272966c070237c8cb540fe67e06df51bc6ea9cc2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 13 Nov 2008 17:46:06 +0900 Subject: serial: sh-sci: Reorder the SCxTDR write after the TDxE clear. Under qemu there is a race between the TDxE read-and-clear and the SCxTDR write. While on hardware it can be gauranteed that the read-and-clear will happen prior to the character being written out, no such assumption can be made under emulation. As this path happens with IRQs off and the hardware itself doesn't care about the ordering, move the SCxTDR write until after the read-and-clear. Signed-off-by: Vladimir Prus Signed-off-by: Paul Mundt --- arch/sh/kernel/early_printk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index 98a29a782301..a952dcf9999d 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -85,9 +85,9 @@ static void scif_sercon_putc(int c) while (((sci_in(&scif_port, SCFDR) & EPK_FIFO_BITS) >= EPK_FIFO_SIZE)) ; - sci_out(&scif_port, SCxTDR, c); sci_in(&scif_port, SCxSR); sci_out(&scif_port, SCxSR, 0xf3 & ~(0x20 | 0x40)); + sci_out(&scif_port, SCxTDR, c); while ((sci_in(&scif_port, SCxSR) & 0x40) == 0) ; -- cgit From 9fa767928fe738aba8e99dae511e91f02fe20b28 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 13 Nov 2008 14:33:51 +0000 Subject: [ARM] dma-mapping: fix compiler warning arch/arm/mm/dma-mapping.c: In function `dma_sync_sg_for_cpu': arch/arm/mm/dma-mapping.c:588: warning: statement with no effect Signed-off-by: Russell King --- arch/arm/include/asm/dma-mapping.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 1cb8602dd9d5..4ed149cbb32a 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -256,8 +256,17 @@ int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); #else -#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1) -#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1) +static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, + unsigned long offset, size_t size, enum dma_data_direction dir) +{ + return 1; +} + +static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, + unsigned long offset, size_t size, enum dma_data_direction dir) +{ + return 1; +} /** -- cgit From 1e8b0416ca387d754410a4e5d6b92ad6e2fb00eb Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 13 Nov 2008 14:43:03 +0000 Subject: [ARM] integrator,realview,versatile: remove FLASH_* and EPROM_* constants FLASH_* and EPROM_* constants are unused, and clash with drivers: drivers/atm/ambassador.h:257:1: warning: "FLASH_BASE" redefined drivers/atm/ambassador.h:258:1: warning: "FLASH_SIZE" redefined drivers/atm/iphase.h:332:1: warning: "EPROM_SIZE" redefined so remove them. Signed-off-by: Russell King --- arch/arm/mach-integrator/include/mach/platform.h | 19 +------------------ arch/arm/mach-realview/include/mach/platform.h | 19 +------------------ arch/arm/mach-versatile/include/mach/platform.h | 18 +----------------- 3 files changed, 3 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-integrator/include/mach/platform.h b/arch/arm/mach-integrator/include/mach/platform.h index 028b87839c0f..e00a2624f269 100644 --- a/arch/arm/mach-integrator/include/mach/platform.h +++ b/arch/arm/mach-integrator/include/mach/platform.h @@ -407,28 +407,11 @@ */ #define uHAL_MEMORY_SIZE INTEGRATOR_SSRAM_SIZE -/* - * Application Flash - * - */ -#define FLASH_BASE INTEGRATOR_FLASH_BASE -#define FLASH_SIZE INTEGRATOR_FLASH_SIZE -#define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) -#define FLASH_BLOCK_SIZE SZ_128K - -/* - * Boot Flash - * - */ -#define EPROM_BASE INTEGRATOR_BOOT_ROM_HI -#define EPROM_SIZE INTEGRATOR_BOOT_ROM_SIZE -#define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) - /* * Clean base - dummy * */ -#define CLEAN_BASE EPROM_BASE +#define CLEAN_BASE INTEGRATOR_BOOT_ROM_HI /* * Timer definitions diff --git a/arch/arm/mach-realview/include/mach/platform.h b/arch/arm/mach-realview/include/mach/platform.h index 4034b54950c2..793a3a332712 100644 --- a/arch/arm/mach-realview/include/mach/platform.h +++ b/arch/arm/mach-realview/include/mach/platform.h @@ -238,28 +238,11 @@ #define REALVIEW_INTREG_OFFSET 0x8 /* Interrupt control */ #define REALVIEW_DECODE_OFFSET 0xC /* Fitted logic modules */ -/* - * Application Flash - * - */ -#define FLASH_BASE REALVIEW_FLASH_BASE -#define FLASH_SIZE REALVIEW_FLASH_SIZE -#define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) -#define FLASH_BLOCK_SIZE SZ_128K - -/* - * Boot Flash - * - */ -#define EPROM_BASE REALVIEW_BOOT_ROM_HI -#define EPROM_SIZE REALVIEW_BOOT_ROM_SIZE -#define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) - /* * Clean base - dummy * */ -#define CLEAN_BASE EPROM_BASE +#define CLEAN_BASE REALVIEW_BOOT_ROM_HI /* * System controller bit assignment diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h index 27cbe6a3f220..f91ba930ca8a 100644 --- a/arch/arm/mach-versatile/include/mach/platform.h +++ b/arch/arm/mach-versatile/include/mach/platform.h @@ -436,28 +436,12 @@ #define SIC_INTMASK_PCI1 (1 << SIC_INT_PCI1) #define SIC_INTMASK_PCI2 (1 << SIC_INT_PCI2) #define SIC_INTMASK_PCI3 (1 << SIC_INT_PCI3) -/* - * Application Flash - * - */ -#define FLASH_BASE VERSATILE_FLASH_BASE -#define FLASH_SIZE VERSATILE_FLASH_SIZE -#define FLASH_END (FLASH_BASE + FLASH_SIZE - 1) -#define FLASH_BLOCK_SIZE SZ_128K - -/* - * Boot Flash - * - */ -#define EPROM_BASE VERSATILE_BOOT_ROM_HI -#define EPROM_SIZE VERSATILE_BOOT_ROM_SIZE -#define EPROM_END (EPROM_BASE + EPROM_SIZE - 1) /* * Clean base - dummy * */ -#define CLEAN_BASE EPROM_BASE +#define CLEAN_BASE VERSATILE_BOOT_ROM_HI /* * System controller bit assignment -- cgit From d9a682a592ff5905d328c648fd30ee7fa12ce8ab Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 13 Nov 2008 14:53:08 +0000 Subject: [ARM] cdb89712,clps7500,h720x: avoid namespace clash for FLASH_* constants Signed-off-by: Russell King --- arch/arm/mach-clps711x/include/mach/hardware.h | 4 ---- arch/arm/mach-clps7500/core.c | 6 +++--- arch/arm/mach-clps7500/include/mach/hardware.h | 6 +++--- arch/arm/mach-h720x/include/mach/boards.h | 6 +++--- 4 files changed, 9 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-clps711x/include/mach/hardware.h b/arch/arm/mach-clps711x/include/mach/hardware.h index 4c3e101b96c9..7cc675c93e20 100644 --- a/arch/arm/mach-clps711x/include/mach/hardware.h +++ b/arch/arm/mach-clps711x/include/mach/hardware.h @@ -95,10 +95,6 @@ #include /* dynamic ioremap() areas */ -#define FLASH_START 0x00000000 -#define FLASH_SIZE 0x800000 -#define FLASH_WIDTH 4 - #define SRAM_START 0x60000000 #define SRAM_SIZE 0xc000 #define SRAM_WIDTH 4 diff --git a/arch/arm/mach-clps7500/core.c b/arch/arm/mach-clps7500/core.c index c3a33b8a5aac..7e247c04d41c 100644 --- a/arch/arm/mach-clps7500/core.c +++ b/arch/arm/mach-clps7500/core.c @@ -275,9 +275,9 @@ static struct map_desc cl7500_io_desc[] __initdata = { .length = ISA_SIZE, .type = MT_DEVICE }, { /* Flash */ - .virtual = FLASH_BASE, - .pfn = __phys_to_pfn(FLASH_START), - .length = FLASH_SIZE, + .virtual = CLPS7500_FLASH_BASE, + .pfn = __phys_to_pfn(CLPS7500_FLASH_START), + .length = CLPS7500_FLASH_SIZE, .type = MT_DEVICE }, { /* LED */ .virtual = LED_BASE, diff --git a/arch/arm/mach-clps7500/include/mach/hardware.h b/arch/arm/mach-clps7500/include/mach/hardware.h index d66578a3371c..a6ad1d44badf 100644 --- a/arch/arm/mach-clps7500/include/mach/hardware.h +++ b/arch/arm/mach-clps7500/include/mach/hardware.h @@ -39,9 +39,9 @@ #define ISA_SIZE 0x00010000 #define ISA_BASE 0xe1000000 -#define FLASH_START 0x01000000 /* XXX */ -#define FLASH_SIZE 0x01000000 -#define FLASH_BASE 0xe2000000 +#define CLPS7500_FLASH_START 0x01000000 /* XXX */ +#define CLPS7500_FLASH_SIZE 0x01000000 +#define CLPS7500_FLASH_BASE 0xe2000000 #define LED_START 0x0302B000 #define LED_SIZE 0x00001000 diff --git a/arch/arm/mach-h720x/include/mach/boards.h b/arch/arm/mach-h720x/include/mach/boards.h index 079b279e1242..38b8e0d61fbf 100644 --- a/arch/arm/mach-h720x/include/mach/boards.h +++ b/arch/arm/mach-h720x/include/mach/boards.h @@ -19,9 +19,9 @@ #ifdef CONFIG_ARCH_H7202 /* FLASH */ -#define FLASH_VIRT 0xd0000000 -#define FLASH_PHYS 0x00000000 -#define FLASH_SIZE 0x02000000 +#define H720X_FLASH_VIRT 0xd0000000 +#define H720X_FLASH_PHYS 0x00000000 +#define H720X_FLASH_SIZE 0x02000000 /* onboard LAN controller */ # define ETH0_PHYS 0x08000000 -- cgit From 8959dabdf2f8f9ce982a2c4cfe6d1652a2fb6320 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 13 Nov 2008 15:02:41 +0000 Subject: [ARM] cdb89712: avoid namespace clashes with SRAM_ and BOOTROM_ constants Signed-off-by: Russell King --- arch/arm/mach-clps711x/include/mach/hardware.h | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-clps711x/include/mach/hardware.h b/arch/arm/mach-clps711x/include/mach/hardware.h index 7cc675c93e20..b3ebe9e4871f 100644 --- a/arch/arm/mach-clps711x/include/mach/hardware.h +++ b/arch/arm/mach-clps711x/include/mach/hardware.h @@ -94,16 +94,6 @@ #include #include -/* dynamic ioremap() areas */ -#define SRAM_START 0x60000000 -#define SRAM_SIZE 0xc000 -#define SRAM_WIDTH 4 - -#define BOOTROM_START 0x70000000 -#define BOOTROM_SIZE 0x80 -#define BOOTROM_WIDTH 4 - - /* static cdb89712_map_io() areas */ #define REGISTER_START 0x80000000 #define REGISTER_SIZE 0x4000 @@ -194,14 +184,6 @@ #define CEIVA_FLASH_SIZE 0x100000 #define CEIVA_FLASH_WIDTH 2 -#define SRAM_START 0x60000000 -#define SRAM_SIZE 0xc000 -#define SRAM_WIDTH 4 - -#define BOOTROM_START 0x70000000 -#define BOOTROM_SIZE 0x80 -#define BOOTROM_WIDTH 4 - /* * SED1355 LCD controller */ -- cgit From 906bf113beb07a76b665c3863aa864acdc8f9950 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Thu, 13 Nov 2008 18:55:54 +0100 Subject: fix cpumask build breakage on parisc Commit 2d3854a37e8b767a51aba38ed6d22817b0631e33 ("cpumask: introduce new API, without changing anything") introduced a build breakage on parisc. This trivial patch fixes it. Signed-off-by: Helge Deller Cc: Rusty Russell Cc: Andrew Morton Cc: Kyle Mc Martin Signed-off-by: Linus Torvalds --- arch/parisc/include/asm/smp.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h index 398cdbaf4e54..409e698f4361 100644 --- a/arch/parisc/include/asm/smp.h +++ b/arch/parisc/include/asm/smp.h @@ -44,8 +44,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask); #define PROC_CHANGE_PENALTY 15 /* Schedule penalty */ -extern unsigned long cpu_present_mask; - #define raw_smp_processor_id() (current_thread_info()->cpu) #else /* CONFIG_SMP */ -- cgit From fb2e7c5e33b341699f139b2ed972dca0a463a670 Mon Sep 17 00:00:00 2001 From: Gerald Schaefer Date: Fri, 14 Nov 2008 18:18:00 +0100 Subject: [S390] Fix range for add_active_range() in setup_memory() add_active_range() expects start_pfn + size as end_pfn value, i.e. not the pfn of the last page frame but the one behind that. We used the pfn of the last page frame so far, which can lead to a BUG_ON in move_freepages(), when the kernelcore parameter is specified (page_zone(start_page) != page_zone(end_page)). Signed-off-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 62122bad1e33..400b040df7fa 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -604,13 +604,13 @@ setup_memory(void) if (memory_chunk[i].type != CHUNK_READ_WRITE) continue; start_chunk = PFN_DOWN(memory_chunk[i].addr); - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; + end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size); end_chunk = min(end_chunk, end_pfn); if (start_chunk >= end_chunk) continue; add_active_range(0, start_chunk, end_chunk); pfn = max(start_chunk, start_pfn); - for (; pfn <= end_chunk; pfn++) + for (; pfn < end_chunk; pfn++) page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); } -- cgit From af4c68740e848019d8d14c52704ed8eacceddac6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:03 +0100 Subject: [S390] lockdep: fix compile bug arch/s390/kernel/built-in.o: In function `cleanup_io_leave_insn': mem_detect.c:(.text+0x10592): undefined reference to `lockdep_sys_exit' Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index ed500ef799b7..5f0c4fba87c3 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -1116,6 +1116,8 @@ cleanup_io_leave_insn: .Ltrace_irq_on: .long trace_hardirqs_on .Ltrace_irq_off: .long trace_hardirqs_off +#endif +#ifdef CONFIG_LOCKDEP .Llockdep_sys_exit: .long lockdep_sys_exit #endif -- cgit From 632448f65001c4935ed0d3bb362017d773da2eca Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:04 +0100 Subject: [S390] ftrace: disable tracing on idle psw Disable tracing on idle psw. Otherwise it would give us huge preempt off times for idle. Which is rather pointless. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/process.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 3e2c05cb6a87..04f8c67a6101 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -136,9 +136,12 @@ static void default_idle(void) return; } trace_hardirqs_on(); + /* Don't trace preempt off for idle. */ + stop_critical_timings(); /* Wait for external, I/O or machine check interrupt. */ __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT); + start_critical_timings(); } void cpu_idle(void) -- cgit From 50bec4ce5d36ebf96189dcc54e20c7fce4bf61bf Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:05 +0100 Subject: [S390] ftrace: fix kernel stack backchain walking With CONFIG_IRQSOFF_TRACER the trace_hardirqs_off() function includes a call to __builtin_return_address(1). But we calltrace_hardirqs_off() from early entry code. There we have just a single stack frame. So this results in a kernel stack backchain walk that would walk beyond the kernel stack. Following the NULL terminated backchain this results in a lowcore read access. To fix this we simply call trace_hardirqs_off_caller() and pass the current instruction pointer. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 18 +++++++++++------- arch/s390/kernel/entry64.S | 11 +++++++---- 2 files changed, 18 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 5f0c4fba87c3..08844fc24a2e 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -61,22 +61,25 @@ STACK_SIZE = 1 << STACK_SHIFT #ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON - l %r1,BASED(.Ltrace_irq_on) + basr %r2,%r0 + l %r1,BASED(.Ltrace_irq_on_caller) basr %r14,%r1 .endm .macro TRACE_IRQS_OFF - l %r1,BASED(.Ltrace_irq_off) + basr %r2,%r0 + l %r1,BASED(.Ltrace_irq_off_caller) basr %r14,%r1 .endm .macro TRACE_IRQS_CHECK + basr %r2,%r0 tm SP_PSW(%r15),0x03 # irqs enabled? jz 0f - l %r1,BASED(.Ltrace_irq_on) + l %r1,BASED(.Ltrace_irq_on_caller) basr %r14,%r1 j 1f -0: l %r1,BASED(.Ltrace_irq_off) +0: l %r1,BASED(.Ltrace_irq_off_caller) basr %r14,%r1 1: .endm @@ -1113,9 +1116,10 @@ cleanup_io_leave_insn: .Lschedtail: .long schedule_tail .Lsysc_table: .long sys_call_table #ifdef CONFIG_TRACE_IRQFLAGS -.Ltrace_irq_on: .long trace_hardirqs_on -.Ltrace_irq_off: - .long trace_hardirqs_off +.Ltrace_irq_on_caller: + .long trace_hardirqs_on_caller +.Ltrace_irq_off_caller: + .long trace_hardirqs_off_caller #endif #ifdef CONFIG_LOCKDEP .Llockdep_sys_exit: diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index d7ce150453f2..41aca06682aa 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -61,19 +61,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ #ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON - brasl %r14,trace_hardirqs_on + basr %r2,%r0 + brasl %r14,trace_hardirqs_on_caller .endm .macro TRACE_IRQS_OFF - brasl %r14,trace_hardirqs_off + basr %r2,%r0 + brasl %r14,trace_hardirqs_off_caller .endm .macro TRACE_IRQS_CHECK + basr %r2,%r0 tm SP_PSW(%r15),0x03 # irqs enabled? jz 0f - brasl %r14,trace_hardirqs_on + brasl %r14,trace_hardirqs_on_caller j 1f -0: brasl %r14,trace_hardirqs_off +0: brasl %r14,trace_hardirqs_off_caller 1: .endm #else -- cgit From 74af283102b358b0da545460d0d176f473e110f6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 14 Nov 2008 18:18:07 +0100 Subject: [S390] cpu topology: fix locking cpu_coregroup_map used to grab a mutex on s390 since it was only called from process context. Since c7c22e4d5c1fdebfac4dba76de7d0338c2b0d832 "block: add support for IO CPU affinity" this is not true anymore. It now also gets called from softirq context. To prevent possible deadlocks change this in architecture code and use a spinlock instead of a mutex. Cc: stable@kernel.org Cc: Jens Axboe Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/topology.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 632b13e10053..a947899dcba1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -65,18 +65,21 @@ static int machine_has_topology_irq; static struct timer_list topology_timer; static void set_topology_timer(void); static DECLARE_WORK(topology_work, topology_work_fn); +/* topology_lock protects the core linked list */ +static DEFINE_SPINLOCK(topology_lock); cpumask_t cpu_core_map[NR_CPUS]; cpumask_t cpu_coregroup_map(unsigned int cpu) { struct core_info *core = &core_info; + unsigned long flags; cpumask_t mask; cpus_clear(mask); if (!machine_has_topology) return cpu_present_map; - mutex_lock(&smp_cpu_state_mutex); + spin_lock_irqsave(&topology_lock, flags); while (core) { if (cpu_isset(cpu, core->mask)) { mask = core->mask; @@ -84,7 +87,7 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) } core = core->next; } - mutex_unlock(&smp_cpu_state_mutex); + spin_unlock_irqrestore(&topology_lock, flags); if (cpus_empty(mask)) mask = cpumask_of_cpu(cpu); return mask; @@ -133,7 +136,7 @@ static void tl_to_cores(struct tl_info *info) union tl_entry *tle, *end; struct core_info *core = &core_info; - mutex_lock(&smp_cpu_state_mutex); + spin_lock_irq(&topology_lock); clear_cores(); tle = info->tle; end = (union tl_entry *)((unsigned long)info + info->length); @@ -157,7 +160,7 @@ static void tl_to_cores(struct tl_info *info) } tle = next_tle(tle); } - mutex_unlock(&smp_cpu_state_mutex); + spin_unlock_irq(&topology_lock); } static void topology_update_polarization_simple(void) -- cgit From d2f019fe40e8fecd822f87bc759f74925a5c31d6 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Nov 2008 18:18:09 +0100 Subject: [S390] fix s390x_newuname The uname system call for 64 bit compares current->personality without masking the upper 16 bits. If e.g. READ_IMPLIES_EXEC is set the result of a uname system call will always be s390x even if the process uses the s390 personality. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/sys_s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 5fdb799062b7..4fe952e557ac 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -198,7 +198,7 @@ asmlinkage long s390x_newuname(struct new_utsname __user *name) { int ret = sys_newuname(name); - if (current->personality == PER_LINUX32 && !ret) { + if (personality(current->personality) == PER_LINUX32 && !ret) { ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); if (ret) ret = -EFAULT; } -- cgit From 27123cbc264de89ce6951b1b4c84c223eb0f1702 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 14 Nov 2008 08:10:19 +0100 Subject: m68k: Fix off-by-one in m68k_setup_user_interrupt() commit 69961c375288bdab7604e0bb1c8d22999bb8a347 ("[PATCH] m68k/Atari: Interrupt updates") added a BUG_ON() with an incorrect upper bound comparison, which causes an early crash on VME boards, where IRQ_USER is 8, cnt is 192 and NR_IRQS is 200. Reported-by: Stephen N Chivers Tested-by: Kars de Jong Signed-off-by: Geert Uytterhoeven Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- arch/m68k/kernel/ints.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c index 7e8a0d394e61..761ee0440c99 100644 --- a/arch/m68k/kernel/ints.c +++ b/arch/m68k/kernel/ints.c @@ -133,7 +133,7 @@ void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt, { int i; - BUG_ON(IRQ_USER + cnt >= NR_IRQS); + BUG_ON(IRQ_USER + cnt > NR_IRQS); m68k_first_user_vec = vec; for (i = 0; i < cnt; i++) irq_controller[IRQ_USER + i] = &user_irq_controller; -- cgit From 52168e60f7d86d83124903098ac8c2dba93cd1c4 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Fri, 14 Nov 2008 13:47:31 +0000 Subject: Revert "x86: blacklist DMAR on Intel G31/G33 chipsets" This reverts commit e51af6630848406fc97adbd71443818cdcda297b, which was wrongly hoovered up and submitted about a month after a better fix had already been merged. The better fix is commit cbda1ba898647aeb4ee770b803c922f595e97731 ("PCI/iommu: blacklist DMAR on Intel G31/G33 chipsets"), where we do this blacklisting based on the DMI identification for the offending motherboard, since sometimes this chipset (or at least a chipset with the same PCI ID) apparently _does_ actually have an IOMMU. Signed-off-by: David Woodhouse Signed-off-by: Linus Torvalds --- arch/x86/include/asm/iommu.h | 1 - arch/x86/kernel/early-quirks.c | 18 ------------------ 2 files changed, 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index e4a552d44465..0b500c5b6446 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -6,7 +6,6 @@ extern void no_iommu_init(void); extern struct dma_mapping_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; -extern int dmar_disabled; extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 3ce029ffaa55..1b894b72c0f5 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -188,20 +188,6 @@ static void __init ati_bugs_contd(int num, int slot, int func) } #endif -#ifdef CONFIG_DMAR -static void __init intel_g33_dmar(int num, int slot, int func) -{ - struct acpi_table_header *dmar_tbl; - acpi_status status; - - status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl); - if (ACPI_SUCCESS(status)) { - printk(KERN_INFO "BIOS BUG: DMAR advertised on Intel G31/G33 chipset -- ignoring\n"); - dmar_disabled = 1; - } -} -#endif - #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) @@ -225,10 +211,6 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, -#ifdef CONFIG_DMAR - { PCI_VENDOR_ID_INTEL, 0x29c0, - PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, intel_g33_dmar }, -#endif {} }; -- cgit From d1f1e9c01006b4b050e090055c75278f80c2a5c5 Mon Sep 17 00:00:00 2001 From: Markus Metzger Date: Sat, 15 Nov 2008 11:00:17 +0100 Subject: x86, bts: fix unlock problem in ds.c Fix a problem where ds_request() returned an error without releasing the ds lock. Reported-by: Stephane Eranian Signed-off-by: Markus Metzger Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/ds.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index 2b69994fd3a8..ac1d5b0586ba 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -384,8 +384,9 @@ static int ds_request(struct task_struct *task, void *base, size_t size, spin_lock(&ds_lock); + error = -EPERM; if (!check_tracer(task)) - return -EPERM; + goto out_unlock; error = -ENOMEM; context = ds_alloc_context(task); -- cgit From d3c6aa1e69f705ac3ab64584101b1d38435b1353 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 16 Nov 2008 00:49:31 -0800 Subject: x86: fix es7000 compiling Impact: fix es7000 build CC arch/x86/kernel/es7000_32.o arch/x86/kernel/es7000_32.c: In function find_unisys_acpi_oem_table: arch/x86/kernel/es7000_32.c:255: error: implicit declaration of function acpi_get_table_with_size arch/x86/kernel/es7000_32.c:261: error: implicit declaration of function early_acpi_os_unmap_memory arch/x86/kernel/es7000_32.c: In function unmap_unisys_acpi_oem_table: arch/x86/kernel/es7000_32.c:277: error: implicit declaration of function __acpi_unmap_table make[1]: *** [arch/x86/kernel/es7000_32.o] Error 1 we applied one patch out of order... | commit a73aaedd95703bd49f4c3f9df06fb7b7373ba905 | Author: Yinghai Lu | Date: Sun Sep 14 02:33:14 2008 -0700 | | x86: check dsdt before find oem table for es7000, v2 | | v2: use __acpi_unmap_table() that patch need: x86: use early_ioremap in __acpi_map_table x86: always explicitly map acpi memory acpi: remove final __acpi_map_table mapping before setting acpi_gbl_permanent_mmap acpi/x86: introduce __apci_map_table, v4 submitted to the ACPI tree but not upstream yet. fix it until those patches applied, need to revert this one Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/kernel/es7000_32.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c index f454c78fcef6..0aa2c443d600 100644 --- a/arch/x86/kernel/es7000_32.c +++ b/arch/x86/kernel/es7000_32.c @@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) { struct acpi_table_header *header = NULL; int i = 0; - acpi_size tbl_size; - while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { + while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) { if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { struct oem_table *t = (struct oem_table *)header; oem_addrX = t->OEMTableAddr; oem_size = t->OEMTableSize; - early_acpi_os_unmap_memory(header, tbl_size); *oem_addr = (unsigned long)__acpi_map_table(oem_addrX, oem_size); return 0; } - early_acpi_os_unmap_memory(header, tbl_size); } return -1; } void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) { - if (!oem_addr) - return; - - __acpi_unmap_table((char *)oem_addr, oem_size); } #endif -- cgit From 0bd7b79851d0f74b24a9ce87d088f2e7c718f668 Mon Sep 17 00:00:00 2001 From: Alexander van Heukelum Date: Sun, 16 Nov 2008 15:29:00 +0100 Subject: x86: entry_64.S: remove whitespace at end of lines Impact: cleanup All blame goes to: color white,red "[^[:graph:]]+$" in .nanorc ;). Signed-off-by: Alexander van Heukelum Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_64.S | 190 ++++++++++++++++++++++----------------------- 1 file changed, 95 insertions(+), 95 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b86f332c96a6..54927784bab9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -11,15 +11,15 @@ * * NOTE: This code handles signal-recognition, which happens every time * after an interrupt and after each system call. - * - * Normal syscalls and interrupts don't save a full stack frame, this is + * + * Normal syscalls and interrupts don't save a full stack frame, this is * only done for syscall tracing, signals or fork/exec et.al. - * - * A note on terminology: - * - top of stack: Architecture defined interrupt frame from SS to RIP - * at the top of the kernel process stack. + * + * A note on terminology: + * - top of stack: Architecture defined interrupt frame from SS to RIP + * at the top of the kernel process stack. * - partial stack frame: partially saved registers upto R11. - * - full stack frame: Like partial stack frame, but all register saved. + * - full stack frame: Like partial stack frame, but all register saved. * * Some macro usage: * - CFI macros are used to generate dwarf2 unwind information for better @@ -142,7 +142,7 @@ END(mcount) #ifndef CONFIG_PREEMPT #define retint_kernel retint_restore_args -#endif +#endif #ifdef CONFIG_PARAVIRT ENTRY(native_usergs_sysret64) @@ -161,14 +161,14 @@ ENTRY(native_usergs_sysret64) .endm /* - * C code is not supposed to know about undefined top of stack. Every time - * a C function with an pt_regs argument is called from the SYSCALL based + * C code is not supposed to know about undefined top of stack. Every time + * a C function with an pt_regs argument is called from the SYSCALL based * fast path FIXUP_TOP_OF_STACK is needed. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs * manipulation. - */ - - /* %rsp:at FRAMEEND */ + */ + + /* %rsp:at FRAMEEND */ .macro FIXUP_TOP_OF_STACK tmp movq %gs:pda_oldrsp,\tmp movq \tmp,RSP(%rsp) @@ -244,8 +244,8 @@ ENTRY(native_usergs_sysret64) .endm /* * A newly forked process directly context switches into this. - */ -/* rdi: prev */ + */ +/* rdi: prev */ ENTRY(ret_from_fork) CFI_DEFAULT_STACK push kernel_eflags(%rip) @@ -256,7 +256,7 @@ ENTRY(ret_from_fork) GET_THREAD_INFO(%rcx) testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) jnz rff_trace -rff_action: +rff_action: RESTORE_REST testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? je int_ret_from_sys_call @@ -267,7 +267,7 @@ rff_action: rff_trace: movq %rsp,%rdi call syscall_trace_leave - GET_THREAD_INFO(%rcx) + GET_THREAD_INFO(%rcx) jmp rff_action CFI_ENDPROC END(ret_from_fork) @@ -278,20 +278,20 @@ END(ret_from_fork) * SYSCALL does not save anything on the stack and does not change the * stack pointer. */ - + /* - * Register setup: + * Register setup: * rax system call number * rdi arg0 - * rcx return address for syscall/sysret, C arg3 + * rcx return address for syscall/sysret, C arg3 * rsi arg1 - * rdx arg2 + * rdx arg2 * r10 arg3 (--> moved to rcx for C) * r8 arg4 * r9 arg5 * r11 eflags for syscall/sysret, temporary for C - * r12-r15,rbp,rbx saved by C code, not touched. - * + * r12-r15,rbp,rbx saved by C code, not touched. + * * Interrupts are off on entry. * Only called from user space. * @@ -301,7 +301,7 @@ END(ret_from_fork) * When user can change the frames always force IRET. That is because * it deals with uncanonical addresses better. SYSRET has trouble * with them due to bugs in both AMD and Intel CPUs. - */ + */ ENTRY(system_call) CFI_STARTPROC simple @@ -317,7 +317,7 @@ ENTRY(system_call) */ ENTRY(system_call_after_swapgs) - movq %rsp,%gs:pda_oldrsp + movq %rsp,%gs:pda_oldrsp movq %gs:pda_kernelstack,%rsp /* * No need to follow this irqs off/on section - it's straight @@ -325,7 +325,7 @@ ENTRY(system_call_after_swapgs) */ ENABLE_INTERRUPTS(CLBR_NONE) SAVE_ARGS 8,1 - movq %rax,ORIG_RAX-ARGOFFSET(%rsp) + movq %rax,ORIG_RAX-ARGOFFSET(%rsp) movq %rcx,RIP-ARGOFFSET(%rsp) CFI_REL_OFFSET rip,RIP-ARGOFFSET GET_THREAD_INFO(%rcx) @@ -339,19 +339,19 @@ system_call_fastpath: movq %rax,RAX-ARGOFFSET(%rsp) /* * Syscall return path ending with SYSRET (fast path) - * Has incomplete stack frame and undefined top of stack. - */ + * Has incomplete stack frame and undefined top of stack. + */ ret_from_sys_call: movl $_TIF_ALLWORK_MASK,%edi /* edi: flagmask */ -sysret_check: +sysret_check: LOCKDEP_SYS_EXIT GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF movl TI_flags(%rcx),%edx andl %edi,%edx - jnz sysret_careful + jnz sysret_careful CFI_REMEMBER_STATE /* * sysretq will re-enable interrupts: @@ -366,7 +366,7 @@ sysret_check: CFI_RESTORE_STATE /* Handle reschedules */ - /* edx: work, edi: workmask */ + /* edx: work, edi: workmask */ sysret_careful: bt $TIF_NEED_RESCHED,%edx jnc sysret_signal @@ -379,7 +379,7 @@ sysret_careful: CFI_ADJUST_CFA_OFFSET -8 jmp sysret_check - /* Handle a signal */ + /* Handle a signal */ sysret_signal: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) @@ -398,7 +398,7 @@ sysret_signal: DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF jmp int_with_check - + badsys: movq $-ENOSYS,RAX-ARGOFFSET(%rsp) jmp ret_from_sys_call @@ -437,7 +437,7 @@ sysret_audit: #endif /* CONFIG_AUDITSYSCALL */ /* Do syscall tracing */ -tracesys: +tracesys: #ifdef CONFIG_AUDITSYSCALL testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) jz auditsys @@ -460,8 +460,8 @@ tracesys: call *sys_call_table(,%rax,8) movq %rax,RAX-ARGOFFSET(%rsp) /* Use IRET because user could have changed frame */ - -/* + +/* * Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. */ @@ -505,18 +505,18 @@ int_very_careful: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) SAVE_REST - /* Check for syscall exit trace */ + /* Check for syscall exit trace */ testl $_TIF_WORK_SYSCALL_EXIT,%edx jz int_signal pushq %rdi CFI_ADJUST_CFA_OFFSET 8 - leaq 8(%rsp),%rdi # &ptregs -> arg1 + leaq 8(%rsp),%rdi # &ptregs -> arg1 call syscall_trace_leave popq %rdi CFI_ADJUST_CFA_OFFSET -8 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi jmp int_restore_rest - + int_signal: testl $_TIF_DO_NOTIFY_MASK,%edx jz 1f @@ -531,11 +531,11 @@ int_restore_rest: jmp int_with_check CFI_ENDPROC END(system_call) - -/* + +/* * Certain special system calls that need to save a complete full stack frame. - */ - + */ + .macro PTREGSCALL label,func,arg .globl \label \label: @@ -572,7 +572,7 @@ ENTRY(ptregscall_common) ret CFI_ENDPROC END(ptregscall_common) - + ENTRY(stub_execve) CFI_STARTPROC popq %r11 @@ -588,11 +588,11 @@ ENTRY(stub_execve) jmp int_ret_from_sys_call CFI_ENDPROC END(stub_execve) - + /* * sigreturn is special because it needs to restore all registers on return. * This cannot be done with SYSRET, so use the IRET return path instead. - */ + */ ENTRY(stub_rt_sigreturn) CFI_STARTPROC addq $8, %rsp @@ -685,12 +685,12 @@ exit_intr: GET_THREAD_INFO(%rcx) testl $3,CS-ARGOFFSET(%rsp) je retint_kernel - + /* Interrupt came from user space */ /* * Has a correct top of stack, but a partial stack frame * %rcx: thread info. Interrupts off. - */ + */ retint_with_reschedule: movl $_TIF_WORK_MASK,%edi retint_check: @@ -763,20 +763,20 @@ retint_careful: pushq %rdi CFI_ADJUST_CFA_OFFSET 8 call schedule - popq %rdi + popq %rdi CFI_ADJUST_CFA_OFFSET -8 GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF jmp retint_check - + retint_signal: testl $_TIF_DO_NOTIFY_MASK,%edx jz retint_swapgs TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) SAVE_REST - movq $-1,ORIG_RAX(%rsp) + movq $-1,ORIG_RAX(%rsp) xorl %esi,%esi # oldset movq %rsp,%rdi # &pt_regs call do_notify_resume @@ -798,14 +798,14 @@ ENTRY(retint_kernel) jnc retint_restore_args call preempt_schedule_irq jmp exit_intr -#endif +#endif CFI_ENDPROC END(common_interrupt) - + /* * APIC interrupts. - */ + */ .macro apicinterrupt num,func INTR_FRAME pushq $~(\num) @@ -823,14 +823,14 @@ ENTRY(threshold_interrupt) apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt END(threshold_interrupt) -#ifdef CONFIG_SMP +#ifdef CONFIG_SMP ENTRY(reschedule_interrupt) apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt END(reschedule_interrupt) .macro INVALIDATE_ENTRY num ENTRY(invalidate_interrupt\num) - apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt + apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt END(invalidate_interrupt\num) .endm @@ -869,22 +869,22 @@ END(error_interrupt) ENTRY(spurious_interrupt) apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt END(spurious_interrupt) - + /* * Exception entry points. - */ + */ .macro zeroentry sym INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME - pushq $0 /* push error code/oldrax */ + pushq $0 /* push error code/oldrax */ CFI_ADJUST_CFA_OFFSET 8 - pushq %rax /* push real oldrax to the rdi slot */ + pushq %rax /* push real oldrax to the rdi slot */ CFI_ADJUST_CFA_OFFSET 8 CFI_REL_OFFSET rax,0 leaq \sym(%rip),%rax jmp error_entry CFI_ENDPROC - .endm + .endm .macro errorentry sym XCPT_FRAME @@ -998,13 +998,13 @@ paranoid_schedule\trace: /* * Exception entry point. This expects an error code/orig_rax on the stack - * and the exception handler in %rax. - */ + * and the exception handler in %rax. + */ KPROBE_ENTRY(error_entry) _frame RDI CFI_REL_OFFSET rax,0 /* rdi slot contains rax, oldrax contains error code */ - cld + cld subq $14*8,%rsp CFI_ADJUST_CFA_OFFSET (14*8) movq %rsi,13*8(%rsp) @@ -1015,7 +1015,7 @@ KPROBE_ENTRY(error_entry) CFI_REL_OFFSET rdx,RDX movq %rcx,11*8(%rsp) CFI_REL_OFFSET rcx,RCX - movq %rsi,10*8(%rsp) /* store rax */ + movq %rsi,10*8(%rsp) /* store rax */ CFI_REL_OFFSET rax,RAX movq %r8, 9*8(%rsp) CFI_REL_OFFSET r8,R8 @@ -1025,29 +1025,29 @@ KPROBE_ENTRY(error_entry) CFI_REL_OFFSET r10,R10 movq %r11,6*8(%rsp) CFI_REL_OFFSET r11,R11 - movq %rbx,5*8(%rsp) + movq %rbx,5*8(%rsp) CFI_REL_OFFSET rbx,RBX - movq %rbp,4*8(%rsp) + movq %rbp,4*8(%rsp) CFI_REL_OFFSET rbp,RBP - movq %r12,3*8(%rsp) + movq %r12,3*8(%rsp) CFI_REL_OFFSET r12,R12 - movq %r13,2*8(%rsp) + movq %r13,2*8(%rsp) CFI_REL_OFFSET r13,R13 - movq %r14,1*8(%rsp) + movq %r14,1*8(%rsp) CFI_REL_OFFSET r14,R14 - movq %r15,(%rsp) + movq %r15,(%rsp) CFI_REL_OFFSET r15,R15 - xorl %ebx,%ebx + xorl %ebx,%ebx testl $3,CS(%rsp) je error_kernelspace -error_swapgs: +error_swapgs: SWAPGS error_sti: TRACE_IRQS_OFF - movq %rdi,RDI(%rsp) + movq %rdi,RDI(%rsp) CFI_REL_OFFSET rdi,RDI movq %rsp,%rdi - movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) call *%rax /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ @@ -1056,7 +1056,7 @@ error_exit: RESTORE_REST DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF - GET_THREAD_INFO(%rcx) + GET_THREAD_INFO(%rcx) testl %eax,%eax jne retint_kernel LOCKDEP_SYS_EXIT_IRQ @@ -1072,7 +1072,7 @@ error_kernelspace: /* There are two places in the kernel that can potentially fault with usergs. Handle them here. The exception handlers after iret run with kernel gs again, so don't set the user space flag. - B stepping K8s sometimes report an truncated RIP for IRET + B stepping K8s sometimes report an truncated RIP for IRET exceptions returning to compat mode. Check for these here too. */ leaq irq_return(%rip),%rcx cmpq %rcx,RIP(%rsp) @@ -1084,17 +1084,17 @@ error_kernelspace: je error_swapgs jmp error_sti KPROBE_END(error_entry) - + /* Reload gs selector with exception handling */ - /* edi: new selector */ + /* edi: new selector */ ENTRY(native_load_gs_index) CFI_STARTPROC pushf CFI_ADJUST_CFA_OFFSET 8 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) SWAPGS -gs_change: - movl %edi,%gs +gs_change: + movl %edi,%gs 2: mfence /* workaround */ SWAPGS popf @@ -1102,20 +1102,20 @@ gs_change: ret CFI_ENDPROC ENDPROC(native_load_gs_index) - + .section __ex_table,"a" .align 8 .quad gs_change,bad_gs .previous .section .fixup,"ax" /* running with kernelgs */ -bad_gs: +bad_gs: SWAPGS /* switch back to user gs */ xorl %eax,%eax movl %eax,%gs jmp 2b - .previous - + .previous + /* * Create a kernel thread. * @@ -1138,7 +1138,7 @@ ENTRY(kernel_thread) xorl %r8d,%r8d xorl %r9d,%r9d - + # clone now call do_fork movq %rax,RAX(%rsp) @@ -1149,14 +1149,14 @@ ENTRY(kernel_thread) * so internally to the x86_64 port you can rely on kernel_thread() * not to reschedule the child before returning, this avoids the need * of hacks for example to fork off the per-CPU idle tasks. - * [Hopefully no generic code relies on the reschedule -AK] + * [Hopefully no generic code relies on the reschedule -AK] */ RESTORE_ALL UNFAKE_STACK_FRAME ret CFI_ENDPROC ENDPROC(kernel_thread) - + child_rip: pushq $0 # fake return address CFI_STARTPROC @@ -1191,10 +1191,10 @@ ENDPROC(child_rip) ENTRY(kernel_execve) CFI_STARTPROC FAKE_STACK_FRAME $0 - SAVE_ALL + SAVE_ALL movq %rsp,%rcx call sys_execve - movq %rax, RAX(%rsp) + movq %rax, RAX(%rsp) RESTORE_REST testq %rax,%rax je int_ret_from_sys_call @@ -1213,7 +1213,7 @@ ENTRY(coprocessor_error) END(coprocessor_error) ENTRY(simd_coprocessor_error) - zeroentry do_simd_coprocessor_error + zeroentry do_simd_coprocessor_error END(simd_coprocessor_error) ENTRY(device_not_available) @@ -1225,12 +1225,12 @@ KPROBE_ENTRY(debug) INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME pushq $0 - CFI_ADJUST_CFA_OFFSET 8 + CFI_ADJUST_CFA_OFFSET 8 paranoidentry do_debug, DEBUG_STACK paranoidexit KPROBE_END(debug) - /* runs on exception stack */ + /* runs on exception stack */ KPROBE_ENTRY(nmi) INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME @@ -1264,7 +1264,7 @@ ENTRY(bounds) END(bounds) ENTRY(invalid_op) - zeroentry do_invalid_op + zeroentry do_invalid_op END(invalid_op) ENTRY(coprocessor_segment_overrun) @@ -1319,7 +1319,7 @@ ENTRY(machine_check) INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME pushq $0 - CFI_ADJUST_CFA_OFFSET 8 + CFI_ADJUST_CFA_OFFSET 8 paranoidentry do_machine_check jmp paranoid_exit1 CFI_ENDPROC -- cgit From 93ce99e849433ede4ce8b410b749dc0cad1100b2 Mon Sep 17 00:00:00 2001 From: Venki Pallipadi Date: Mon, 17 Nov 2008 14:43:58 -0800 Subject: x86: add rdtsc barrier to TSC sync check Impact: fix incorrectly marked unstable TSC clock Patch (commit 0d12cdd "sched: improve sched_clock() performance") has a regression on one of the test systems here. With the patch, I see: checking TSC synchronization [CPU#0 -> CPU#1]: Measured 28 cycles TSC warp between CPUs, turning off TSC clock. Marking TSC unstable due to check_tsc_sync_source failed Whereas, without the patch syncs pass fine on all CPUs: checking TSC synchronization [CPU#0 -> CPU#1]: passed. Due to this, TSC is marked unstable, when it is not actually unstable. This is because syncs in check_tsc_wrap() goes away due to this commit. As per the discussion on this thread, correct way to fix this is to add explicit syncs as below? Signed-off-by: Venkatesh Pallipadi Signed-off-by: Ingo Molnar --- arch/x86/kernel/tsc_sync.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 9ffb01c31c40..1c0dfbca87c1 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void) cycles_t start, now, prev, end; int i; + rdtsc_barrier(); start = get_cycles(); + rdtsc_barrier(); /* * The measurement runs for 20 msecs: */ @@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void) */ __raw_spin_lock(&sync_lock); prev = last_tsc; + rdtsc_barrier(); now = get_cycles(); + rdtsc_barrier(); last_tsc = now; __raw_spin_unlock(&sync_lock); -- cgit From 10db4ef7b9a65b86e4d047671a1886f4c101a859 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 18 Nov 2008 15:23:08 +0100 Subject: x86, PEBS/DS: fix code flow in ds_request() this compiler warning: arch/x86/kernel/ds.c: In function 'ds_request': arch/x86/kernel/ds.c:368: warning: 'context' may be used uninitialized in this function Shows that the code flow in ds_request() is buggy - it goes into the unlock+release-context path even when the context is not allocated yet. First allocate the context, then do the other checks. Also, take care with GFP allocations under the ds_lock spinlock. Cc: Signed-off-by: Ingo Molnar --- arch/x86/kernel/ds.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c index ac1d5b0586ba..d1a121443bde 100644 --- a/arch/x86/kernel/ds.c +++ b/arch/x86/kernel/ds.c @@ -236,17 +236,33 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task) struct ds_context *context = *p_context; if (!context) { + spin_unlock(&ds_lock); + context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) + if (!context) { + spin_lock(&ds_lock); return NULL; + } context->ds = kzalloc(ds_cfg.sizeof_ds, GFP_KERNEL); if (!context->ds) { kfree(context); + spin_lock(&ds_lock); return NULL; } + spin_lock(&ds_lock); + /* + * Check for race - another CPU could have allocated + * it meanwhile: + */ + if (*p_context) { + kfree(context->ds); + kfree(context); + return *p_context; + } + *p_context = context; context->this = p_context; @@ -384,15 +400,15 @@ static int ds_request(struct task_struct *task, void *base, size_t size, spin_lock(&ds_lock); - error = -EPERM; - if (!check_tracer(task)) - goto out_unlock; - error = -ENOMEM; context = ds_alloc_context(task); if (!context) goto out_unlock; + error = -EPERM; + if (!check_tracer(task)) + goto out_unlock; + error = -EALREADY; if (context->owner[qual] == current) goto out_unlock; -- cgit