summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
blob: 7c7cccd820f79eca5716cf27c8c0bfb5881bb783 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 *  linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.h
 *
 *  eHEA ethernet device driver for IBM eServer System p
 *
 *  (C) Copyright IBM Corp. 2006
 *
 *  Authors:
 *       Christoph Raisch <raisch@de.ibm.com>
 *       Jan-Bernd Themann <themann@de.ibm.com>
 *       Thomas Klein <tklein@de.ibm.com>
 */

#ifndef __EHEA_QMR_H__
#define __EHEA_QMR_H__

#include <linux/prefetch.h>
#include "ehea.h"
#include "ehea_hw.h"

/*
 * page size of ehea hardware queues
 */

#define EHEA_PAGESHIFT         12
#define EHEA_PAGESIZE          (1UL << EHEA_PAGESHIFT)
#define EHEA_SECTSIZE          (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
#define EHEA_HUGEPAGESHIFT     34
#define EHEA_HUGEPAGE_SIZE     (1UL << EHEA_HUGEPAGESHIFT)
#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)

#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif

/* Some abbreviations used here:
 *
 * WQE  - Work Queue Entry
 * SWQE - Send Work Queue Entry
 * RWQE - Receive Work Queue Entry
 * CQE  - Completion Queue Entry
 * EQE  - Event Queue Entry
 * MR   - Memory Region
 */

/* Use of WR_ID field for EHEA */
#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
#define EHEA_SWQE2_TYPE    0x1
#define EHEA_SWQE3_TYPE    0x2
#define EHEA_RWQE2_TYPE    0x3
#define EHEA_RWQE3_TYPE    0x4
#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)

struct ehea_vsgentry {
	u64 vaddr;
	u32 l_key;
	u32 len;
};

/* maximum number of sg entries allowed in a WQE */
#define EHEA_MAX_WQE_SG_ENTRIES  	252
#define SWQE2_MAX_IMM            	(0xD0 - 0x30)
#define SWQE3_MAX_IMM            	224

/* tx control flags for swqe */
#define EHEA_SWQE_CRC                   0x8000
#define EHEA_SWQE_IP_CHECKSUM           0x4000
#define EHEA_SWQE_TCP_CHECKSUM          0x2000
#define EHEA_SWQE_TSO                   0x1000
#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
#define EHEA_SWQE_VLAN_INSERT           0x0400
#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
#define EHEA_SWQE_WRAP_CTL_REC          0x0080
#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
#define EHEA_SWQE_BIND                  0x0020
#define EHEA_SWQE_PURGE                 0x0010

/* sizeof(struct ehea_swqe) less the union */
#define SWQE_HEADER_SIZE		32

struct ehea_swqe {
	u64 wr_id;
	u16 tx_control;
	u16 vlan_tag;
	u8 reserved1;
	u8 ip_start;
	u8 ip_end;
	u8 immediate_data_length;
	u8 tcp_offset;
	u8 reserved2;
	u16 reserved2b;
	u8 wrap_tag;
	u8 descriptors;		/* number of valid descriptors in WQE */
	u16 reserved3;
	u16 reserved4;
	u16 mss;
	u32 reserved5;
	union {
		/*  Send WQE Format 1 */
		struct {
			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
		} no_immediate_data;

		/*  Send WQE Format 2 */
		struct {
			struct ehea_vsgentry sg_entry;
			/* 0x30 */
			u8 immediate_data[SWQE2_MAX_IMM];
			/* 0xd0 */
			struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
		} immdata_desc __packed;

		/*  Send WQE Format 3 */
		struct {
			u8 immediate_data[SWQE3_MAX_IMM];
		} immdata_nodesc;
	} u;
};

struct ehea_rwqe {
	u64 wr_id;		/* work request ID */
	u8 reserved1[5];
	u8 data_segments;
	u16 reserved2;
	u64 reserved3;
	u64 reserved4;
	struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
};

#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400

#define EHEA_CQE_TYPE_RQ           0x60
#define EHEA_CQE_STAT_ERR_MASK     0x700F
#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
#define EHEA_CQE_BLIND_CKSUM       0x8000
#define EHEA_CQE_STAT_ERR_TCP      0x4000
#define EHEA_CQE_STAT_ERR_IP       0x2000
#define EHEA_CQE_STAT_ERR_CRC      0x1000

/* Defines which bad send cqe stati lead to a port reset */
#define EHEA_CQE_STAT_RESET_MASK   0x0002

struct ehea_cqe {
	u64 wr_id;		/* work request ID from WQE */
	u8 type;
	u8 valid;
	u16 status;
	u16 reserved1;
	u16 num_bytes_transfered;
	u16 vlan_tag;
	u16 inet_checksum_value;
	u8 reserved2;
	u8 header_length;
	u16 reserved3;
	u16 page_offset;
	u16 wqe_count;
	u32 qp_token;
	u32 timestamp;
	u32 reserved4;
	u64 reserved5[3];
};

#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)

#define EHEA_AER_RESTYPE_QP  0x8
#define EHEA_AER_RESTYPE_CQ  0x4
#define EHEA_AER_RESTYPE_EQ  0x3

/* Defines which affiliated errors lead to a port reset */
#define EHEA_AER_RESET_MASK   0xFFFFFFFFFEFFFFFFULL
#define EHEA_AERR_RESET_MASK  0xFFFFFFFFFFFFFFFFULL

struct ehea_eqe {
	u64 entry;
};

#define ERROR_DATA_LENGTH  EHEA_BMASK_IBM(52, 63)
#define ERROR_DATA_TYPE    EHEA_BMASK_IBM(0, 7)

static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
	struct ehea_page *current_page;

	if (q_offset >= queue->queue_length)
		q_offset -= queue->queue_length;
	current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
	return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
}

static inline void *hw_qeit_get(struct hw_queue *queue)
{
	return hw_qeit_calc(queue, queue->current_q_offset);
}

static inline void hw_qeit_inc(struct hw_queue *queue)
{
	queue->current_q_offset += queue->qe_size;
	if (queue->current_q_offset >= queue->queue_length) {
		queue->current_q_offset = 0;
		/* toggle the valid flag */
		queue->toggle_state = (~queue->toggle_state) & 1;
	}
}

static inline void *hw_qeit_get_inc(struct hw_queue *queue)
{
	void *retvalue = hw_qeit_get(queue);
	hw_qeit_inc(queue);
	return retvalue;
}

static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
{
	struct ehea_cqe *retvalue = hw_qeit_get(queue);
	u8 valid = retvalue->valid;
	void *pref;

	if ((valid >> 7) == (queue->toggle_state & 1)) {
		/* this is a good one */
		hw_qeit_inc(queue);
		pref = hw_qeit_calc(queue, queue->current_q_offset);
		prefetch(pref);
		prefetch(pref + 128);
	} else
		retvalue = NULL;
	return retvalue;
}

static inline void *hw_qeit_get_valid(struct hw_queue *queue)
{
	struct ehea_cqe *retvalue = hw_qeit_get(queue);
	void *pref;
	u8 valid;

	pref = hw_qeit_calc(queue, queue->current_q_offset);
	prefetch(pref);
	prefetch(pref + 128);
	prefetch(pref + 256);
	valid = retvalue->valid;
	if (!((valid >> 7) == (queue->toggle_state & 1)))
		retvalue = NULL;
	return retvalue;
}

static inline void *hw_qeit_reset(struct hw_queue *queue)
{
	queue->current_q_offset = 0;
	return hw_qeit_get(queue);
}

static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
{
	u64 last_entry_in_q = queue->queue_length - queue->qe_size;
	void *retvalue;

	retvalue = hw_qeit_get(queue);
	queue->current_q_offset += queue->qe_size;
	if (queue->current_q_offset > last_entry_in_q) {
		queue->current_q_offset = 0;
		queue->toggle_state = (~queue->toggle_state) & 1;
	}
	return retvalue;
}

static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
{
	void *retvalue = hw_qeit_get(queue);
	u32 qe = *(u8 *)retvalue;
	if ((qe >> 7) == (queue->toggle_state & 1))
		hw_qeit_eq_get_inc(queue);
	else
		retvalue = NULL;
	return retvalue;
}

static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
						   int rq_nr)
{
	struct hw_queue *queue;

	if (rq_nr == 1)
		queue = &qp->hw_rqueue1;
	else if (rq_nr == 2)
		queue = &qp->hw_rqueue2;
	else
		queue = &qp->hw_rqueue3;

	return hw_qeit_get_inc(queue);
}

static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
					      int *wqe_index)
{
	struct hw_queue *queue = &my_qp->hw_squeue;
	struct ehea_swqe *wqe_p;

	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
	wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);

	return wqe_p;
}

static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
{
	iosync();
	ehea_update_sqa(my_qp, 1);
}

static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
{
	struct hw_queue *queue = &qp->hw_rqueue1;

	*wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
	return hw_qeit_get_valid(queue);
}

static inline void ehea_inc_cq(struct ehea_cq *cq)
{
	hw_qeit_inc(&cq->hw_queue);
}

static inline void ehea_inc_rq1(struct ehea_qp *qp)
{
	hw_qeit_inc(&qp->hw_rqueue1);
}

static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
{
	return hw_qeit_get_valid(&my_cq->hw_queue);
}

#define EHEA_CQ_REGISTER_ORIG 0
#define EHEA_EQ_REGISTER_ORIG 0

enum ehea_eq_type {
	EHEA_EQ = 0,		/* event queue              */
	EHEA_NEQ		/* notification event queue */
};

struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
			       enum ehea_eq_type type,
			       const u32 length, const u8 eqe_gen);

int ehea_destroy_eq(struct ehea_eq *eq);

struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);

struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
			       u64 eq_handle, u32 cq_token);

int ehea_destroy_cq(struct ehea_cq *cq);

struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
			       struct ehea_qp_init_attr *init_attr);

int ehea_destroy_qp(struct ehea_qp *qp);

int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);

int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
		 struct ehea_mr *shared_mr);

int ehea_rem_mr(struct ehea_mr *mr);

u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
		    u64 *aer, u64 *aerr);

int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_create_busmap(void);
void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);

#endif	/* __EHEA_QMR_H__ */