summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm/ehea/ehea_hw.h
blob: 180d4128a7113f6fd320f7a1555072ed1477d323 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
/*
 *  linux/drivers/net/ethernet/ibm/ehea/ehea_hw.h
 *
 *  eHEA ethernet device driver for IBM eServer System p
 *
 *  (C) Copyright IBM Corp. 2006
 *
 *  Authors:
 *       Christoph Raisch <raisch@de.ibm.com>
 *       Jan-Bernd Themann <themann@de.ibm.com>
 *       Thomas Klein <tklein@de.ibm.com>
 *
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#ifndef __EHEA_HW_H__
#define __EHEA_HW_H__

#define QPX_SQA_VALUE   EHEA_BMASK_IBM(48, 63)
#define QPX_RQ1A_VALUE  EHEA_BMASK_IBM(48, 63)
#define QPX_RQ2A_VALUE  EHEA_BMASK_IBM(48, 63)
#define QPX_RQ3A_VALUE  EHEA_BMASK_IBM(48, 63)

#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)

struct ehea_qptemm {
	u64 qpx_hcr;
	u64 qpx_c;
	u64 qpx_herr;
	u64 qpx_aer;
	u64 qpx_sqa;
	u64 qpx_sqc;
	u64 qpx_rq1a;
	u64 qpx_rq1c;
	u64 qpx_st;
	u64 qpx_aerr;
	u64 qpx_tenure;
	u64 qpx_reserved1[(0x098 - 0x058) / 8];
	u64 qpx_portp;
	u64 qpx_reserved2[(0x100 - 0x0A0) / 8];
	u64 qpx_t;
	u64 qpx_sqhp;
	u64 qpx_sqptp;
	u64 qpx_reserved3[(0x140 - 0x118) / 8];
	u64 qpx_sqwsize;
	u64 qpx_reserved4[(0x170 - 0x148) / 8];
	u64 qpx_sqsize;
	u64 qpx_reserved5[(0x1B0 - 0x178) / 8];
	u64 qpx_sigt;
	u64 qpx_wqecnt;
	u64 qpx_rq1hp;
	u64 qpx_rq1ptp;
	u64 qpx_rq1size;
	u64 qpx_reserved6[(0x220 - 0x1D8) / 8];
	u64 qpx_rq1wsize;
	u64 qpx_reserved7[(0x240 - 0x228) / 8];
	u64 qpx_pd;
	u64 qpx_scqn;
	u64 qpx_rcqn;
	u64 qpx_aeqn;
	u64 reserved49;
	u64 qpx_ram;
	u64 qpx_reserved8[(0x300 - 0x270) / 8];
	u64 qpx_rq2a;
	u64 qpx_rq2c;
	u64 qpx_rq2hp;
	u64 qpx_rq2ptp;
	u64 qpx_rq2size;
	u64 qpx_rq2wsize;
	u64 qpx_rq2th;
	u64 qpx_rq3a;
	u64 qpx_rq3c;
	u64 qpx_rq3hp;
	u64 qpx_rq3ptp;
	u64 qpx_rq3size;
	u64 qpx_rq3wsize;
	u64 qpx_rq3th;
	u64 qpx_lpn;
	u64 qpx_reserved9[(0x400 - 0x378) / 8];
	u64 reserved_ext[(0x500 - 0x400) / 8];
	u64 reserved2[(0x1000 - 0x500) / 8];
};

#define MRx_HCR_LPARID_VALID EHEA_BMASK_IBM(0, 0)

#define MRMWMM_OFFSET(x) offsetof(struct ehea_mrmwmm, x)

struct ehea_mrmwmm {
	u64 mrx_hcr;
	u64 mrx_c;
	u64 mrx_herr;
	u64 mrx_aer;
	u64 mrx_pp;
	u64 reserved1;
	u64 reserved2;
	u64 reserved3;
	u64 reserved4[(0x200 - 0x40) / 8];
	u64 mrx_ctl[64];
};

#define QPEDMM_OFFSET(x) offsetof(struct ehea_qpedmm, x)

struct ehea_qpedmm {

	u64 reserved0[(0x400) / 8];
	u64 qpedx_phh;
	u64 qpedx_ppsgp;
	u64 qpedx_ppsgu;
	u64 qpedx_ppdgp;
	u64 qpedx_ppdgu;
	u64 qpedx_aph;
	u64 qpedx_apsgp;
	u64 qpedx_apsgu;
	u64 qpedx_apdgp;
	u64 qpedx_apdgu;
	u64 qpedx_apav;
	u64 qpedx_apsav;
	u64 qpedx_hcr;
	u64 reserved1[4];
	u64 qpedx_rrl0;
	u64 qpedx_rrrkey0;
	u64 qpedx_rrva0;
	u64 reserved2;
	u64 qpedx_rrl1;
	u64 qpedx_rrrkey1;
	u64 qpedx_rrva1;
	u64 reserved3;
	u64 qpedx_rrl2;
	u64 qpedx_rrrkey2;
	u64 qpedx_rrva2;
	u64 reserved4;
	u64 qpedx_rrl3;
	u64 qpedx_rrrkey3;
	u64 qpedx_rrva3;
};

#define CQX_FECADDER EHEA_BMASK_IBM(32, 63)
#define CQX_FEC_CQE_CNT EHEA_BMASK_IBM(32, 63)
#define CQX_N1_GENERATE_COMP_EVENT EHEA_BMASK_IBM(0, 0)
#define CQX_EP_EVENT_PENDING EHEA_BMASK_IBM(0, 0)

#define CQTEMM_OFFSET(x) offsetof(struct ehea_cqtemm, x)

struct ehea_cqtemm {
	u64 cqx_hcr;
	u64 cqx_c;
	u64 cqx_herr;
	u64 cqx_aer;
	u64 cqx_ptp;
	u64 cqx_tp;
	u64 cqx_fec;
	u64 cqx_feca;
	u64 cqx_ep;
	u64 cqx_eq;
	u64 reserved1;
	u64 cqx_n0;
	u64 cqx_n1;
	u64 reserved2[(0x1000 - 0x60) / 8];
};

#define EQTEMM_OFFSET(x) offsetof(struct ehea_eqtemm, x)

struct ehea_eqtemm {
	u64 eqx_hcr;
	u64 eqx_c;
	u64 eqx_herr;
	u64 eqx_aer;
	u64 eqx_ptp;
	u64 eqx_tp;
	u64 eqx_ssba;
	u64 eqx_psba;
	u64 eqx_cec;
	u64 eqx_meql;
	u64 eqx_xisbi;
	u64 eqx_xisc;
	u64 eqx_it;
};

/*
 * These access functions will be changed when the dissuccsion about
 * the new access methods for POWER has settled.
 */

static inline u64 epa_load(struct h_epa epa, u32 offset)
{
	return __raw_readq((void __iomem *)(epa.addr + offset));
}

static inline void epa_store(struct h_epa epa, u32 offset, u64 value)
{
	__raw_writeq(value, (void __iomem *)(epa.addr + offset));
	epa_load(epa, offset);	/* synchronize explicitly to eHEA */
}

static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
{
	__raw_writeq(value, (void __iomem *)(epa.addr + offset));
}

#define epa_store_cq(epa, offset, value)\
	epa_store(epa, CQTEMM_OFFSET(offset), value)
#define epa_load_cq(epa, offset)\
	epa_load(epa, CQTEMM_OFFSET(offset))

static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
{
	struct h_epa epa = qp->epas.kernel;
	epa_store_acc(epa, QPTEMM_OFFSET(qpx_sqa),
		      EHEA_BMASK_SET(QPX_SQA_VALUE, nr_wqes));
}

static inline void ehea_update_rq3a(struct ehea_qp *qp, u16 nr_wqes)
{
	struct h_epa epa = qp->epas.kernel;
	epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq3a),
		      EHEA_BMASK_SET(QPX_RQ1A_VALUE, nr_wqes));
}

static inline void ehea_update_rq2a(struct ehea_qp *qp, u16 nr_wqes)
{
	struct h_epa epa = qp->epas.kernel;
	epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq2a),
		      EHEA_BMASK_SET(QPX_RQ2A_VALUE, nr_wqes));
}

static inline void ehea_update_rq1a(struct ehea_qp *qp, u16 nr_wqes)
{
	struct h_epa epa = qp->epas.kernel;
	epa_store_acc(epa, QPTEMM_OFFSET(qpx_rq1a),
		      EHEA_BMASK_SET(QPX_RQ3A_VALUE, nr_wqes));
}

static inline void ehea_update_feca(struct ehea_cq *cq, u32 nr_cqes)
{
	struct h_epa epa = cq->epas.kernel;
	epa_store_acc(epa, CQTEMM_OFFSET(cqx_feca),
		      EHEA_BMASK_SET(CQX_FECADDER, nr_cqes));
}

static inline void ehea_reset_cq_n1(struct ehea_cq *cq)
{
	struct h_epa epa = cq->epas.kernel;
	epa_store_cq(epa, cqx_n1,
		     EHEA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, 1));
}

static inline void ehea_reset_cq_ep(struct ehea_cq *my_cq)
{
	struct h_epa epa = my_cq->epas.kernel;
	epa_store_acc(epa, CQTEMM_OFFSET(cqx_ep),
		      EHEA_BMASK_SET(CQX_EP_EVENT_PENDING, 0));
}

#endif	/* __EHEA_HW_H__ */