diff options
| -rw-r--r-- | arch/alpha/Kconfig | 1 | ||||
| -rw-r--r-- | arch/alpha/include/asm/atomic.h | 88 | ||||
| -rw-r--r-- | arch/alpha/include/asm/cmpxchg.h | 12 | 
3 files changed, 54 insertions, 47 deletions
| diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 5998106faa60..7920fc2e2a2a 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,6 +2,7 @@  config ALPHA  	bool  	default y +	select ARCH_ATOMIC  	select ARCH_32BIT_USTAT_F_TINODE  	select ARCH_MIGHT_HAVE_PC_PARPORT  	select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index e41c113c6688..f2861a43a61e 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -26,11 +26,11 @@  #define ATOMIC64_INIT(i)	{ (i) } -#define atomic_read(v)		READ_ONCE((v)->counter) -#define atomic64_read(v)	READ_ONCE((v)->counter) +#define arch_atomic_read(v)	READ_ONCE((v)->counter) +#define arch_atomic64_read(v)	READ_ONCE((v)->counter) -#define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i)) -#define atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i)) +#define arch_atomic_set(v,i)	WRITE_ONCE((v)->counter, (i)) +#define arch_atomic64_set(v,i)	WRITE_ONCE((v)->counter, (i))  /*   * To get proper branch prediction for the main line, we must branch @@ -39,7 +39,7 @@   */  #define ATOMIC_OP(op, asm_op)						\ -static __inline__ void atomic_##op(int i, atomic_t * v)			\ +static __inline__ void arch_atomic_##op(int i, atomic_t * v)		\  {									\  	unsigned long temp;						\  	__asm__ __volatile__(						\ @@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v)			\  }									\  #define ATOMIC_OP_RETURN(op, asm_op)					\ -static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\  {									\  	long temp, result;						\  	__asm__ __volatile__(						\ @@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\  }  #define ATOMIC_FETCH_OP(op, asm_op)					\ -static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\  {									\  	long temp, result;						\  	__asm__ __volatile__(						\ @@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\  }  #define ATOMIC64_OP(op, asm_op)						\ -static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		\ +static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v)	\  {									\  	s64 temp;							\  	__asm__ __volatile__(						\ @@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		\  }									\  #define ATOMIC64_OP_RETURN(op, asm_op)					\ -static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)	\ +static __inline__ s64							\ +arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)		\  {									\  	s64 temp, result;						\  	__asm__ __volatile__(						\ @@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)	\  }  #define ATOMIC64_FETCH_OP(op, asm_op)					\ -static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)	\ +static __inline__ s64							\ +arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)		\  {									\  	s64 temp, result;						\  	__asm__ __volatile__(						\ @@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)	\  ATOMIC_OPS(add)  ATOMIC_OPS(sub) -#define atomic_add_return_relaxed	atomic_add_return_relaxed -#define atomic_sub_return_relaxed	atomic_sub_return_relaxed -#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed -#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed +#define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed -#define atomic64_add_return_relaxed	atomic64_add_return_relaxed -#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed -#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed -#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed +#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed -#define atomic_andnot atomic_andnot -#define atomic64_andnot atomic64_andnot +#define arch_atomic_andnot			arch_atomic_andnot +#define arch_atomic64_andnot			arch_atomic64_andnot  #undef ATOMIC_OPS  #define ATOMIC_OPS(op, asm)						\ @@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)  ATOMIC_OPS(or, bis)  ATOMIC_OPS(xor, xor) -#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed -#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed -#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed -#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed -#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed -#define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed -#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed -#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed  #undef ATOMIC_OPS  #undef ATOMIC64_FETCH_OP @@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)  #undef ATOMIC_OP_RETURN  #undef ATOMIC_OP -#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic64_cmpxchg(v, old, new) \ +	(arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic64_xchg(v, new) \ +	(arch_xchg(&((v)->counter), new)) -#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#define arch_atomic_cmpxchg(v, old, new) \ +	(arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic_xchg(v, new) \ +	(arch_xchg(&((v)->counter), new))  /** - * atomic_fetch_add_unless - add unless the number is a given value + * arch_atomic_fetch_add_unless - add unless the number is a given value   * @v: pointer of type atomic_t   * @a: the amount to add to v...   * @u: ...unless v is equal to u. @@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)   * Atomically adds @a to @v, so long as it was not @u.   * Returns the old value of @v.   */ -static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) +static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)  {  	int c, new, old;  	smp_mb(); @@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)  	smp_mb();  	return old;  } -#define atomic_fetch_add_unless atomic_fetch_add_unless +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless  /** - * atomic64_fetch_add_unless - add unless the number is a given value + * arch_atomic64_fetch_add_unless - add unless the number is a given value   * @v: pointer of type atomic64_t   * @a: the amount to add to v...   * @u: ...unless v is equal to u. @@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)   * Atomically adds @a to @v, so long as it was not @u.   * Returns the old value of @v.   */ -static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)  {  	s64 c, new, old;  	smp_mb(); @@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)  	smp_mb();  	return old;  } -#define atomic64_fetch_add_unless atomic64_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless  /* - * atomic64_dec_if_positive - decrement by 1 if old value positive + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive   * @v: pointer of type atomic_t   *   * The function returns the old value of *v minus 1, even if   * the atomic variable, v, was not decremented.   */ -static inline s64 atomic64_dec_if_positive(atomic64_t *v) +static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)  {  	s64 old, tmp;  	smp_mb(); @@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)  	smp_mb();  	return old - 1;  } -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive  #endif /* _ALPHA_ATOMIC_H */ diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h index 6c7c39452471..6e0a850aa9d3 100644 --- a/arch/alpha/include/asm/cmpxchg.h +++ b/arch/alpha/include/asm/cmpxchg.h @@ -17,7 +17,7 @@  				       sizeof(*(ptr)));			\  }) -#define cmpxchg_local(ptr, o, n)					\ +#define arch_cmpxchg_local(ptr, o, n)					\  ({									\  	__typeof__(*(ptr)) _o_ = (o);					\  	__typeof__(*(ptr)) _n_ = (n);					\ @@ -26,7 +26,7 @@  					  sizeof(*(ptr)));		\  }) -#define cmpxchg64_local(ptr, o, n)					\ +#define arch_cmpxchg64_local(ptr, o, n)					\  ({									\  	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\  	cmpxchg_local((ptr), (o), (n));					\ @@ -42,7 +42,7 @@   * The leading and the trailing memory barriers guarantee that these   * operations are fully ordered.   */ -#define xchg(ptr, x)							\ +#define arch_xchg(ptr, x)						\  ({									\  	__typeof__(*(ptr)) __ret;					\  	__typeof__(*(ptr)) _x_ = (x);					\ @@ -53,7 +53,7 @@  	__ret;								\  }) -#define cmpxchg(ptr, o, n)						\ +#define arch_cmpxchg(ptr, o, n)						\  ({									\  	__typeof__(*(ptr)) __ret;					\  	__typeof__(*(ptr)) _o_ = (o);					\ @@ -65,10 +65,10 @@  	__ret;								\  }) -#define cmpxchg64(ptr, o, n)						\ +#define arch_cmpxchg64(ptr, o, n)					\  ({									\  	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\ -	cmpxchg((ptr), (o), (n));					\ +	arch_cmpxchg((ptr), (o), (n));					\  })  #undef ____cmpxchg | 
