From b9b382dabba1e4a106093557933ec4411ebcbe0f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Dec 2006 12:43:06 +0900 Subject: sh: Reworked swap cache entry encoding for SH-X2 MMU. In the 64-bit PTE case there's no point in restricting the encoding to the low bits of the PTE, we can instead bump all of this up to the high 32 bits and extend PTE_FILE_MAX_BITS to 32, adopting the same convention used by x86 PAE. There's a minor discrepency between the number of bits used for the swap type encoding between 32 and 64-bit PTEs, but this is unlikely to cause any problem given the extended offset. Signed-off-by: Paul Mundt --- include/asm-sh/pgtable.h | 47 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index c84901dbd8e5..af62bd33a02a 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -508,16 +508,50 @@ struct vm_area_struct; extern void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte); -/* Encode and de-code a swap entry */ /* + * Encode and de-code a swap entry + * + * Constraints: + * _PAGE_FILE at bit 0 + * _PAGE_PRESENT at bit 8 + * _PAGE_PROTNONE at bit 9 + * + * For the normal case, we encode the swap type into bits 0:7 and the + * swap offset into bits 10:30. For the 64-bit PTE case, we keep the + * preserved bits in the low 32-bits and use the upper 32 as the swap + * offset (along with a 5-bit type), following the same approach as x86 + * PAE. This keeps the logic quite simple, and allows for a full 32 + * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with + * in the pte_low case. + * + * As is evident by the Alpha code, if we ever get a 64-bit unsigned + * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes + * much cleaner.. + * * NOTE: We should set ZEROs at the position of _PAGE_PRESENT * and _PAGE_PROTNONE bits */ -#define __swp_type(x) ((x).val & 0xff) -#define __swp_offset(x) ((x).val >> 10) -#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) +#ifdef CONFIG_X2TLB +#define __swp_type(x) ((x).val & 0x1f) +#define __swp_offset(x) ((x).val >> 5) +#define __swp_entry(type, offset) ((swp_entry_t){ (type) | (offset) << 5}) +#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) +#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) + +/* + * Encode and decode a nonlinear file mapping entry + */ +#define pte_to_pgoff(pte) ((pte).pte_high) +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) + +#define PTE_FILE_MAX_BITS 32 +#else +#define __swp_type(x) ((x).val & 0xff) +#define __swp_offset(x) ((x).val >> 10) +#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10)}) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) /* * Encode and decode a nonlinear file mapping entry @@ -525,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma, #define PTE_FILE_MAX_BITS 29 #define pte_to_pgoff(pte) (pte_val(pte) >> 1) #define pgoff_to_pte(off) ((pte_t) { ((off) << 1) | _PAGE_FILE }) +#endif typedef pte_t *pte_addr_t; -- cgit From b482ad5daef786962279ae03090970b0ee8b8d1c Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Dec 2006 12:43:52 +0900 Subject: sh: Shut up csum_ipv6_magic() warnings. Signed-off-by: Paul Mundt --- include/asm-sh/checksum.h | 69 +++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h index d44344c88e73..4bc8357e8892 100644 --- a/include/asm-sh/checksum.h +++ b/include/asm-sh/checksum.h @@ -34,25 +34,26 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); */ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); /* * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the + * passed in an incorrect kernel address to one of these functions. + * + * If you use these functions directly please don't forget the * access_ok(). */ -static __inline__ +static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum) + int len, __wsum sum) { - return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); + return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } -static __inline__ +static inline __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) + int len, __wsum sum, int *err_ptr) { return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); @@ -62,7 +63,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, * Fold a partial checksum */ -static __inline__ __sum16 csum_fold(__wsum sum) +static inline __sum16 csum_fold(__wsum sum) { unsigned int __dummy; __asm__("swap.w %0, %1\n\t" @@ -85,7 +86,7 @@ static __inline__ __sum16 csum_fold(__wsum sum) * i386 version by Jorge Cwik , adapted * for linux by * Arnt Gulbrandsen. */ -static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { unsigned int sum, __dummy0, __dummy1; @@ -113,10 +114,10 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) return csum_fold(sum); } -static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) { #ifdef __LITTLE_ENDIAN__ unsigned long len_proto = (proto + len) << 8; @@ -132,6 +133,7 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, : "=r" (sum), "=r" (len_proto) : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum) : "t"); + return sum; } @@ -139,30 +141,28 @@ static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, * computes the checksum of the TCP/UDP pseudo-header * returns a 16-bit checksum, already complemented */ -static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + unsigned short len, + unsigned short proto, + __wsum sum) { - return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } /* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ - -static __inline__ __sum16 ip_compute_csum(const void *buff, int len) +static inline __sum16 ip_compute_csum(const void *buff, int len) { - return csum_fold (csum_partial(buff, len, 0)); + return csum_fold(csum_partial(buff, len, 0)); } #define _HAVE_ARCH_IPV6_CSUM -#ifdef CONFIG_IPV6 -static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, - const struct in6_addr *daddr, - __u32 len, unsigned short proto, - __wsum sum) +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, unsigned short proto, + __wsum sum) { unsigned int __dummy; __asm__("clrt\n\t" @@ -187,22 +187,21 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, "movt %1\n\t" "add %1, %0\n" : "=r" (sum), "=&r" (__dummy) - : "r" (saddr), "r" (daddr), + : "r" (saddr), "r" (daddr), "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) : "t"); return csum_fold(sum); } -#endif -/* +/* * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -static __inline__ __wsum csum_and_copy_to_user (const void *src, - void __user *dst, - int len, __wsum sum, - int *err_ptr) +static inline __wsum csum_and_copy_to_user(const void *src, + void __user *dst, + int len, __wsum sum, + int *err_ptr) { if (access_ok(VERIFY_WRITE, dst, len)) return csum_partial_copy_generic((__force const void *)src, -- cgit From fce3a24e7087ba1f56eea5ec14fec592e677e672 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Dec 2006 13:20:58 +0900 Subject: sh: push-switch fixups for work_struct API damage. INIT_WORK() dropped the data arg, so now we have to stash an extra pointer and backpedal instead. Signed-off-by: Paul Mundt --- include/asm-sh/push-switch.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include') diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h index dfc6bad567f0..4903f9e52dd8 100644 --- a/include/asm-sh/push-switch.h +++ b/include/asm-sh/push-switch.h @@ -4,6 +4,7 @@ #include #include #include +#include struct push_switch { /* switch state */ @@ -12,6 +13,8 @@ struct push_switch { struct timer_list debounce; /* workqueue */ struct work_struct work; + /* platform device, for workqueue handler */ + struct platform_device *pdev; }; struct push_switch_platform_info { -- cgit From b6250e37292b9679b82a3edc28f330a32eed9ae4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Dec 2006 17:27:18 +0900 Subject: sh: landisk board build fixes. Get the landisk board building again.. Signed-off-by: Paul Mundt --- include/asm-sh/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index af62bd33a02a..036ca2843866 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h @@ -548,7 +548,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma, #else #define __swp_type(x) ((x).val & 0xff) #define __swp_offset(x) ((x).val >> 10) -#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10)}) +#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) <<10}) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 1 }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 1 }) -- cgit From ec723fbe7e19f5a66cea183bca7ca20675631a7a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 7 Dec 2006 20:33:38 +0900 Subject: sh: Split out atomic ops logically. We have a few different ways to do the atomic operations, so split them out in to different headers rather than bloating atomic.h. Kernelspace gUSA will take this up to a third implementation. Signed-off-by: Paul Mundt --- include/asm-sh/atomic-irq.h | 71 ++++++++++++++++++++ include/asm-sh/atomic-llsc.h | 107 ++++++++++++++++++++++++++++++ include/asm-sh/atomic.h | 153 +------------------------------------------ 3 files changed, 180 insertions(+), 151 deletions(-) create mode 100644 include/asm-sh/atomic-irq.h create mode 100644 include/asm-sh/atomic-llsc.h (limited to 'include') diff --git a/include/asm-sh/atomic-irq.h b/include/asm-sh/atomic-irq.h new file mode 100644 index 000000000000..74f7943cff6f --- /dev/null +++ b/include/asm-sh/atomic-irq.h @@ -0,0 +1,71 @@ +#ifndef __ASM_SH_ATOMIC_IRQ_H +#define __ASM_SH_ATOMIC_IRQ_H + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v += i; + local_irq_restore(flags); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v -= i; + local_irq_restore(flags); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp += i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp -= i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v &= ~mask; + local_irq_restore(flags); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v |= mask; + local_irq_restore(flags); +} + +#endif /* __ASM_SH_ATOMIC_IRQ_H */ diff --git a/include/asm-sh/atomic-llsc.h b/include/asm-sh/atomic-llsc.h new file mode 100644 index 000000000000..4b00b78e3f4f --- /dev/null +++ b/include/asm-sh/atomic-llsc.h @@ -0,0 +1,107 @@ +#ifndef __ASM_SH_ATOMIC_LLSC_H +#define __ASM_SH_ATOMIC_LLSC_H + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_add \n" +" add %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (i), "r" (&v->counter) + : "t"); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_sub \n" +" sub %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (i), "r" (&v->counter) + : "t"); +} + +/* + * SH-4A note: + * + * We basically get atomic_xxx_return() for free compared with + * atomic_xxx(). movli.l/movco.l require r0 due to the instruction + * encoding, so the retval is automatically set without having to + * do any special work. + */ +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long temp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_add_return \n" +" add %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp) + : "r" (i), "r" (&v->counter) + : "t"); + + return temp; +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long temp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_sub_return \n" +" sub %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" +" synco \n" + : "=&z" (temp) + : "r" (i), "r" (&v->counter) + : "t"); + + return temp; +} + +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_clear_mask \n" +" and %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (~mask), "r" (&v->counter) + : "t"); +} + +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long tmp; + + __asm__ __volatile__ ( +"1: movli.l @%2, %0 ! atomic_set_mask \n" +" or %1, %0 \n" +" movco.l %0, @%2 \n" +" bf 1b \n" + : "=&z" (tmp) + : "r" (mask), "r" (&v->counter) + : "t"); +} + +#endif /* __ASM_SH_ATOMIC_LLSC_H */ diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index 28305c3cbddf..e12570b9339d 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h @@ -17,119 +17,14 @@ typedef struct { volatile int counter; } atomic_t; #include #include -/* - * To get proper branch prediction for the main line, we must branch - * forward to code at the end of this object's .text section, then - * branch back to restart the operation. - */ -static inline void atomic_add(int i, atomic_t *v) -{ #ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (i), "r" (&v->counter) - : "t"); +#include #else - unsigned long flags; - - local_irq_save(flags); - *(long *)v += i; - local_irq_restore(flags); -#endif -} - -static inline void atomic_sub(int i, atomic_t *v) -{ -#ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (i), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - *(long *)v -= i; - local_irq_restore(flags); +#include #endif -} - -/* - * SH-4A note: - * - * We basically get atomic_xxx_return() for free compared with - * atomic_xxx(). movli.l/movco.l require r0 due to the instruction - * encoding, so the retval is automatically set without having to - * do any special work. - */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - unsigned long temp; - -#ifdef CONFIG_CPU_SH4A - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_add_return \n" -" add %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp) - : "r" (i), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - temp = *(long *)v; - temp += i; - *(long *)v = temp; - local_irq_restore(flags); -#endif - - return temp; -} #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) -static inline int atomic_sub_return(int i, atomic_t *v) -{ - unsigned long temp; - -#ifdef CONFIG_CPU_SH4A - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_sub_return \n" -" sub %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" -" synco \n" - : "=&z" (temp) - : "r" (i), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - temp = *(long *)v; - temp -= i; - *(long *)v = temp; - local_irq_restore(flags); -#endif - - return temp; -} - #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) @@ -180,50 +75,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) } #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -{ -#ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_clear_mask \n" -" and %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (~mask), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - *(long *)v &= ~mask; - local_irq_restore(flags); -#endif -} - -static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -{ -#ifdef CONFIG_CPU_SH4A - unsigned long tmp; - - __asm__ __volatile__ ( -"1: movli.l @%2, %0 ! atomic_set_mask \n" -" or %1, %0 \n" -" movco.l %0, @%2 \n" -" bf 1b \n" - : "=&z" (tmp) - : "r" (mask), "r" (&v->counter) - : "t"); -#else - unsigned long flags; - - local_irq_save(flags); - *(long *)v |= mask; - local_irq_restore(flags); -#endif -} - /* Atomic operations are already serializing on SH */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() -- cgit From dc34d312c7b25d5d0f54c16d143a9526936e5d38 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 8 Dec 2006 17:41:43 +0900 Subject: sh: BUG() handling through trapa vector. Previously we haven't been doing anything with verbose BUG() reporting, and we've been relying on the oops path for handling BUG()'s, which is rather sub-optimal. This switches BUG handling to use a fixed trapa vector (#0x3e) where we construct a small bug frame post trapa instruction to get the context right. This also makes it trivial to wire up a DIE_BUG for the atomic die chain, which we couldn't really do before. Signed-off-by: Paul Mundt --- include/asm-sh/bug.h | 53 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h index 1b4fc52a59e8..2f89dd06d0cd 100644 --- a/include/asm-sh/bug.h +++ b/include/asm-sh/bug.h @@ -1,19 +1,54 @@ #ifndef __ASM_SH_BUG_H #define __ASM_SH_BUG_H - #ifdef CONFIG_BUG -/* - * Tell the user there is some problem. - */ -#define BUG() do { \ - printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ - *(volatile int *)0 = 0; \ + +struct bug_frame { + unsigned short opcode; + unsigned short line; + const char *file; + const char *func; +}; + +struct pt_regs; + +extern void handle_BUG(struct pt_regs *); + +#define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ + +#ifdef CONFIG_DEBUG_BUGVERBOSE + +#define BUG() \ +do { \ + __asm__ __volatile__ ( \ + ".align 2\n\t" \ + ".short %O0\n\t" \ + ".short %O1\n\t" \ + ".long %O2\n\t" \ + ".long %O3\n\t" \ + : \ + : "n" (TRAPA_BUG_OPCODE), \ + "i" (__LINE__), "X" (__FILE__), \ + "X" (__FUNCTION__)); \ +} while (0) + +#else + +#define BUG() \ +do { \ + __asm__ __volatile__ ( \ + ".align 2\n\t" \ + ".short %O0\n\t" \ + : \ + : "n" (TRAPA_BUG_OPCODE)); \ } while (0) +#endif /* CONFIG_DEBUG_BUGVERBOSE */ + #define HAVE_ARCH_BUG -#endif + +#endif /* CONFIG_BUG */ #include -#endif +#endif /* __ASM_SH_BUG_H */ -- cgit From 5432143464ee7f5cb8b0b015a0fd1c3279af10ae Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 9 Dec 2006 09:17:01 +0900 Subject: sh: Fixup dma_cache_sync() callers. This now takes a struct device, update all of the callers. Signed-off-by: Paul Mundt --- include/asm-sh/dma-mapping.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 37ab0c131a4d..8d0867b98e05 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h @@ -67,7 +67,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, if (dev->bus == &pci_bus_type) return virt_to_bus(ptr); #endif - dma_cache_sync(ptr, size, dir); + dma_cache_sync(dev, ptr, size, dir); return virt_to_bus(ptr); } @@ -81,7 +81,7 @@ static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, for (i = 0; i < nents; i++) { #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(page_address(sg[i].page) + sg[i].offset, + dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, sg[i].length, dir); #endif sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; @@ -112,7 +112,7 @@ static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, if (dev->bus == &pci_bus_type) return; #endif - dma_cache_sync(bus_to_virt(dma_handle), size, dir); + dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir); } static inline void dma_sync_single_range(struct device *dev, @@ -124,7 +124,7 @@ static inline void dma_sync_single_range(struct device *dev, if (dev->bus == &pci_bus_type) return; #endif - dma_cache_sync(bus_to_virt(dma_handle) + offset, size, dir); + dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir); } static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, @@ -134,7 +134,7 @@ static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, for (i = 0; i < nelems; i++) { #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) - dma_cache_sync(page_address(sg[i].page) + sg[i].offset, + dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, sg[i].length, dir); #endif sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; -- cgit From 41504c39726a7099e5a42508dd57fe561c8b4129 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 11 Dec 2006 20:28:03 +0900 Subject: sh: SH-MobileR SH7722 CPU support. This adds CPU support for the SH7722. Signed-off-by: Paul Mundt --- include/asm-sh/bugs.h | 12 ++++++++++-- include/asm-sh/cpu-sh4/cache.h | 2 +- include/asm-sh/cpu-sh4/freq.h | 2 +- include/asm-sh/irq.h | 5 ++++- include/asm-sh/processor.h | 8 ++++++-- 5 files changed, 22 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h index 795047da5e17..a294997a8412 100644 --- a/include/asm-sh/bugs.h +++ b/include/asm-sh/bugs.h @@ -16,9 +16,8 @@ static void __init check_bugs(void) { - extern char *get_cpu_subtype(void); extern unsigned long loops_per_jiffy; - char *p= &init_utsname()->machine[2]; /* "sh" */ + char *p = &init_utsname()->machine[2]; /* "sh" */ cpu_data->loops_per_jiffy = loops_per_jiffy; @@ -40,6 +39,15 @@ static void __init check_bugs(void) *p++ = '4'; *p++ = 'a'; break; + case CPU_SH73180 ... CPU_SH7722: + *p++ = '4'; + *p++ = 'a'; + *p++ = 'l'; + *p++ = '-'; + *p++ = 'd'; + *p++ = 's'; + *p++ = 'p'; + break; default: *p++ = '?'; *p++ = '!'; diff --git a/include/asm-sh/cpu-sh4/cache.h b/include/asm-sh/cpu-sh4/cache.h index 6e9c7e6ee8e4..f92b20a0983d 100644 --- a/include/asm-sh/cpu-sh4/cache.h +++ b/include/asm-sh/cpu-sh4/cache.h @@ -22,7 +22,7 @@ #define CCR_CACHE_ICE 0x0100 /* Instruction Cache Enable */ #define CCR_CACHE_ICI 0x0800 /* IC Invalidate */ #define CCR_CACHE_IIX 0x8000 /* IC Index Enable */ -#ifndef CONFIG_CPU_SUBTYPE_SH7780 +#ifndef CONFIG_CPU_SH4A #define CCR_CACHE_EMODE 0x80000000 /* EMODE Enable */ #endif diff --git a/include/asm-sh/cpu-sh4/freq.h b/include/asm-sh/cpu-sh4/freq.h index ef2b9b1ae41f..602d061ca2dc 100644 --- a/include/asm-sh/cpu-sh4/freq.h +++ b/include/asm-sh/cpu-sh4/freq.h @@ -10,7 +10,7 @@ #ifndef __ASM_CPU_SH4_FREQ_H #define __ASM_CPU_SH4_FREQ_H -#if defined(CONFIG_CPU_SUBTYPE_SH73180) +#if defined(CONFIG_CPU_SUBTYPE_SH73180) || defined(CONFIG_CPU_SUBTYPE_SH7722) #define FRQCR 0xa4150000 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) #define FRQCR 0xffc80000 diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h index fd576088e47e..bff965ef4b95 100644 --- a/include/asm-sh/irq.h +++ b/include/asm-sh/irq.h @@ -37,7 +37,8 @@ # define ONCHIP_NR_IRQS 144 #elif defined(CONFIG_CPU_SUBTYPE_SH7300) || \ defined(CONFIG_CPU_SUBTYPE_SH73180) || \ - defined(CONFIG_CPU_SUBTYPE_SH7343) + defined(CONFIG_CPU_SUBTYPE_SH7343) || \ + defined(CONFIG_CPU_SUBTYPE_SH7722) # define ONCHIP_NR_IRQS 109 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) # define ONCHIP_NR_IRQS 111 @@ -79,6 +80,8 @@ # define OFFCHIP_NR_IRQS 16 #elif defined(CONFIG_SH_7343_SOLUTION_ENGINE) # define OFFCHIP_NR_IRQS 12 +#elif defined(CONFIG_SH_7722_SOLUTION_ENGINE) +# define OFFCHIP_NR_IRQS 14 #elif defined(CONFIG_SH_UNKNOWN) # define OFFCHIP_NR_IRQS 16 /* Must also be last */ #else diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h index 6f1dd7ca1b1d..e29f2abb92de 100644 --- a/include/asm-sh/processor.h +++ b/include/asm-sh/processor.h @@ -27,6 +27,8 @@ #define CCN_CVR 0xff000040 #define CCN_PRR 0xff000044 +const char *get_cpu_subtype(void); + /* * CPU type and hardware bug flags. Kept separately for each CPU. * @@ -52,8 +54,10 @@ enum cpu_type { CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, /* SH-4A types */ - CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, - CPU_SH7785, + CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, + + /* SH4AL-DSP types */ + CPU_SH73180, CPU_SH7343, CPU_SH7722, /* Unknown subtype */ CPU_SH_NONE -- cgit