diff options
Diffstat (limited to 'include/asm-sh')
| -rw-r--r-- | include/asm-sh/irq.h | 4 | ||||
| -rw-r--r-- | include/asm-sh/spinlock.h | 61 | ||||
| -rw-r--r-- | include/asm-sh/spinlock_types.h | 22 | 
3 files changed, 42 insertions, 45 deletions
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h index 831e52ee45b5..614a8c13b721 100644 --- a/include/asm-sh/irq.h +++ b/include/asm-sh/irq.h @@ -587,10 +587,6 @@ static inline int generic_irq_demux(int irq)  #define irq_canonicalize(irq)	(irq)  #define irq_demux(irq)		__irq_demux(sh_mv.mv_irq_demux(irq)) -struct irqaction; -struct pt_regs; -int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); -  #if defined(CONFIG_CPU_SUBTYPE_SH73180)  #include <asm/irq-sh73180.h>  #endif diff --git a/include/asm-sh/spinlock.h b/include/asm-sh/spinlock.h index e770b55649eb..846322d4c35d 100644 --- a/include/asm-sh/spinlock.h +++ b/include/asm-sh/spinlock.h @@ -15,20 +15,11 @@  /*   * Your basic SMP spinlocks, allowing only a single CPU anywhere   */ -typedef struct { -	volatile unsigned long lock; -#ifdef CONFIG_PREEMPT -	unsigned int break_lock; -#endif -} spinlock_t; -#define SPIN_LOCK_UNLOCKED	(spinlock_t) { 0 } - -#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) - -#define spin_is_locked(x)	((x)->lock != 0) -#define spin_unlock_wait(x)	do { barrier(); } while (spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +#define __raw_spin_is_locked(x)	((x)->lock != 0) +#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define __raw_spin_unlock_wait(x) \ +	do { cpu_relax(); } while (__raw_spin_is_locked(x))  /*   * Simple spin lock operations.  There are two variants, one clears IRQ's @@ -36,7 +27,7 @@ typedef struct {   *   * We make no fairness assumptions.  They have a cost.   */ -static inline void _raw_spin_lock(spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock)  {  	__asm__ __volatile__ (  		"1:\n\t" @@ -49,14 +40,14 @@ static inline void _raw_spin_lock(spinlock_t *lock)  	);  } -static inline void _raw_spin_unlock(spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock)  {  	assert_spin_locked(lock);  	lock->lock = 0;  } -#define _raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock)) +#define __raw_spin_trylock(x) (!test_and_set_bit(0, &(x)->lock))  /*   * Read-write spinlocks, allowing multiple readers but only one writer. @@ -66,51 +57,40 @@ static inline void _raw_spin_unlock(spinlock_t *lock)   * needs to get a irq-safe write-lock, but readers can get non-irqsafe   * read-locks.   */ -typedef struct { -	spinlock_t lock; -	atomic_t counter; -#ifdef CONFIG_PREEMPT -	unsigned int break_lock; -#endif -} rwlock_t; - -#define RW_LOCK_BIAS		0x01000000 -#define RW_LOCK_UNLOCKED	(rwlock_t) { { 0 }, { RW_LOCK_BIAS } } -#define rwlock_init(x)		do { *(x) = RW_LOCK_UNLOCKED; } while (0) - -static inline void _raw_read_lock(rwlock_t *rw) + +static inline void __raw_read_lock(raw_rwlock_t *rw)  { -	_raw_spin_lock(&rw->lock); +	__raw_spin_lock(&rw->lock);  	atomic_inc(&rw->counter); -	_raw_spin_unlock(&rw->lock); +	__raw_spin_unlock(&rw->lock);  } -static inline void _raw_read_unlock(rwlock_t *rw) +static inline void __raw_read_unlock(raw_rwlock_t *rw)  { -	_raw_spin_lock(&rw->lock); +	__raw_spin_lock(&rw->lock);  	atomic_dec(&rw->counter); -	_raw_spin_unlock(&rw->lock); +	__raw_spin_unlock(&rw->lock);  } -static inline void _raw_write_lock(rwlock_t *rw) +static inline void __raw_write_lock(raw_rwlock_t *rw)  { -	_raw_spin_lock(&rw->lock); +	__raw_spin_lock(&rw->lock);  	atomic_set(&rw->counter, -1);  } -static inline void _raw_write_unlock(rwlock_t *rw) +static inline void __raw_write_unlock(raw_rwlock_t *rw)  {  	atomic_set(&rw->counter, 0); -	_raw_spin_unlock(&rw->lock); +	__raw_spin_unlock(&rw->lock);  } -#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) +#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) -static inline int _raw_write_trylock(rwlock_t *rw) +static inline int __raw_write_trylock(raw_rwlock_t *rw)  {  	if (atomic_sub_and_test(RW_LOCK_BIAS, &rw->counter))  		return 1; @@ -121,4 +101,3 @@ static inline int _raw_write_trylock(rwlock_t *rw)  }  #endif /* __ASM_SH_SPINLOCK_H */ - diff --git a/include/asm-sh/spinlock_types.h b/include/asm-sh/spinlock_types.h new file mode 100644 index 000000000000..8c41b6c3aac8 --- /dev/null +++ b/include/asm-sh/spinlock_types.h @@ -0,0 +1,22 @@ +#ifndef __ASM_SH_SPINLOCK_TYPES_H +#define __ASM_SH_SPINLOCK_TYPES_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +# error "please don't include this file directly" +#endif + +typedef struct { +	volatile unsigned long lock; +} raw_spinlock_t; + +#define __SPIN_LOCK_UNLOCKED		{ 0 } + +typedef struct { +	raw_spinlock_t lock; +	atomic_t counter; +} raw_rwlock_t; + +#define RW_LOCK_BIAS			0x01000000 +#define __RAW_RW_LOCK_UNLOCKED		{ { 0 }, { RW_LOCK_BIAS } } + +#endif  | 
