summaryrefslogtreecommitdiff
path: root/arch/frv/include/asm/atomic_defs.h
blob: d4912c88b829dbd8115ad25336101cd47d0f8567 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174

#include <asm/spr-regs.h>

#ifdef __ATOMIC_LIB__

#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS

#define ATOMIC_QUALS
#define ATOMIC_EXPORT(x)	EXPORT_SYMBOL(x)

#else /* !OUTOFLINE && LIB */

#define ATOMIC_OP_RETURN(op)
#define ATOMIC_FETCH_OP(op)

#endif /* OUTOFLINE */

#else /* !__ATOMIC_LIB__ */

#define ATOMIC_EXPORT(x)

#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS

#define ATOMIC_OP_RETURN(op)						\
extern int __atomic_##op##_return(int i, int *v);			\
extern long long __atomic64_##op##_return(long long i, long long *v);

#define ATOMIC_FETCH_OP(op)						\
extern int __atomic32_fetch_##op(int i, int *v);			\
extern long long __atomic64_fetch_##op(long long i, long long *v);

#else /* !OUTOFLINE && !LIB */

#define ATOMIC_QUALS	static inline

#endif /* OUTOFLINE */
#endif /* __ATOMIC_LIB__ */


/*
 * Note on the 64 bit inline asm variants...
 *
 * CSTD is a conditional instruction and needs a constrained memory reference.
 * Normally 'U' provides the correct constraints for conditional instructions
 * and this is used for the 32 bit version, however 'U' does not appear to work
 * for 64 bit values (gcc-4.9)
 *
 * The exact constraint is that conditional instructions cannot deal with an
 * immediate displacement in the memory reference, so what we do is we read the
 * address through a volatile cast into a local variable in order to insure we
 * _have_ to compute the correct address without displacement. This allows us
 * to use the regular 'm' for the memory address.
 *
 * Furthermore, the %Ln operand, which prints the low word register (r+1),
 * really only works for registers, this means we cannot allow immediate values
 * for the 64 bit versions -- like we do for the 32 bit ones.
 *
 */

#ifndef ATOMIC_OP_RETURN
#define ATOMIC_OP_RETURN(op)						\
ATOMIC_QUALS int __atomic_##op##_return(int i, int *v)			\
{									\
	int val;							\
									\
	asm volatile(							\
	    "0:						\n"		\
	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
	    "	ckeq		icc3,cc7		\n"		\
	    "	ld.p		%M0,%1			\n"		\
	    "	orcr		cc7,cc7,cc3		\n"		\
	    "   "#op"%I2	%1,%2,%1		\n"		\
	    "	cst.p		%1,%M0		,cc3,#1	\n"		\
	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
	    "	beq		icc3,#0,0b		\n"		\
	    : "+U"(*v), "=&r"(val)					\
	    : "NPr"(i)							\
	    : "memory", "cc7", "cc3", "icc3"				\
	    );								\
									\
	return val;							\
}									\
ATOMIC_EXPORT(__atomic_##op##_return);					\
									\
ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v)	\
{									\
	long long *__v = READ_ONCE(v);					\
	long long val;							\
									\
	asm volatile(							\
	    "0:						\n"		\
	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
	    "	ckeq		icc3,cc7		\n"		\
	    "	ldd.p		%M0,%1			\n"		\
	    "	orcr		cc7,cc7,cc3		\n"		\
	    "   "#op"cc		%L1,%L2,%L1,icc0	\n"		\
	    "   "#op"x		%1,%2,%1,icc0		\n"		\
	    "	cstd.p		%1,%M0		,cc3,#1	\n"		\
	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
	    "	beq		icc3,#0,0b		\n"		\
	    : "+m"(*__v), "=&e"(val)					\
	    : "e"(i)							\
	    : "memory", "cc7", "cc3", "icc0", "icc3"			\
	    );								\
									\
	return val;							\
}									\
ATOMIC_EXPORT(__atomic64_##op##_return);
#endif

#ifndef ATOMIC_FETCH_OP
#define ATOMIC_FETCH_OP(op)						\
ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v)			\
{									\
	int old, tmp;							\
									\
	asm volatile(							\
		"0:						\n"	\
		"	orcc		gr0,gr0,gr0,icc3	\n"	\
		"	ckeq		icc3,cc7		\n"	\
		"	ld.p		%M0,%1			\n"	\
		"	orcr		cc7,cc7,cc3		\n"	\
		"	"#op"%I3	%1,%3,%2		\n"	\
		"	cst.p		%2,%M0		,cc3,#1	\n"	\
		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
		"	beq		icc3,#0,0b		\n"	\
		: "+U"(*v), "=&r"(old), "=r"(tmp)			\
		: "NPr"(i)						\
		: "memory", "cc7", "cc3", "icc3"			\
		);							\
									\
	return old;							\
}									\
ATOMIC_EXPORT(__atomic32_fetch_##op);					\
									\
ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v)	\
{									\
	long long *__v = READ_ONCE(v);					\
	long long old, tmp;						\
									\
	asm volatile(							\
		"0:						\n"	\
		"	orcc		gr0,gr0,gr0,icc3	\n"	\
		"	ckeq		icc3,cc7		\n"	\
		"	ldd.p		%M0,%1			\n"	\
		"	orcr		cc7,cc7,cc3		\n"	\
		"	"#op"		%L1,%L3,%L2		\n"	\
		"	"#op"		%1,%3,%2		\n"	\
		"	cstd.p		%2,%M0		,cc3,#1	\n"	\
		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
		"	beq		icc3,#0,0b		\n"	\
		: "+m"(*__v), "=&e"(old), "=e"(tmp)			\
		: "e"(i)						\
		: "memory", "cc7", "cc3", "icc3"			\
		);							\
									\
	return old;							\
}									\
ATOMIC_EXPORT(__atomic64_fetch_##op);
#endif

ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(xor)
ATOMIC_FETCH_OP(add)
ATOMIC_FETCH_OP(sub)

ATOMIC_OP_RETURN(add)
ATOMIC_OP_RETURN(sub)

#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_QUALS
#undef ATOMIC_EXPORT