summaryrefslogtreecommitdiff
path: root/include/rdma/ib_mad.h
blob: fdef558e3a2dc5c579648c2fabc1a495df46f0df (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
/*
 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
 * Copyright (c) 2004-2006 Voltaire Corporation.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#if !defined(IB_MAD_H)
#define IB_MAD_H

#include <linux/list.h>

#include <rdma/ib_verbs.h>
#include <uapi/rdma/ib_user_mad.h>

/* Management base versions */
#define IB_MGMT_BASE_VERSION			1
#define OPA_MGMT_BASE_VERSION			0x80

#define OPA_SM_CLASS_VERSION			0x80

/* Management classes */
#define IB_MGMT_CLASS_SUBN_LID_ROUTED		0x01
#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE	0x81
#define IB_MGMT_CLASS_SUBN_ADM			0x03
#define IB_MGMT_CLASS_PERF_MGMT			0x04
#define IB_MGMT_CLASS_BM			0x05
#define IB_MGMT_CLASS_DEVICE_MGMT		0x06
#define IB_MGMT_CLASS_CM			0x07
#define IB_MGMT_CLASS_SNMP			0x08
#define IB_MGMT_CLASS_DEVICE_ADM		0x10
#define IB_MGMT_CLASS_BOOT_MGMT			0x11
#define IB_MGMT_CLASS_BIS			0x12
#define IB_MGMT_CLASS_CONG_MGMT			0x21
#define IB_MGMT_CLASS_VENDOR_RANGE2_START	0x30
#define IB_MGMT_CLASS_VENDOR_RANGE2_END		0x4F

#define	IB_OPENIB_OUI				(0x001405)

/* Management methods */
#define IB_MGMT_METHOD_GET			0x01
#define IB_MGMT_METHOD_SET			0x02
#define IB_MGMT_METHOD_GET_RESP			0x81
#define IB_MGMT_METHOD_SEND			0x03
#define IB_MGMT_METHOD_TRAP			0x05
#define IB_MGMT_METHOD_REPORT			0x06
#define IB_MGMT_METHOD_REPORT_RESP		0x86
#define IB_MGMT_METHOD_TRAP_REPRESS		0x07

#define IB_MGMT_METHOD_RESP			0x80
#define IB_BM_ATTR_MOD_RESP			cpu_to_be32(1)

#define IB_MGMT_MAX_METHODS			128

/* MAD Status field bit masks */
#define IB_MGMT_MAD_STATUS_SUCCESS			0x0000
#define IB_MGMT_MAD_STATUS_BUSY				0x0001
#define IB_MGMT_MAD_STATUS_REDIRECT_REQD		0x0002
#define IB_MGMT_MAD_STATUS_BAD_VERSION			0x0004
#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD		0x0008
#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB	0x000c
#define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE		0x001c

/* RMPP information */
#define IB_MGMT_RMPP_VERSION			1

#define IB_MGMT_RMPP_TYPE_DATA			1
#define IB_MGMT_RMPP_TYPE_ACK			2
#define IB_MGMT_RMPP_TYPE_STOP			3
#define IB_MGMT_RMPP_TYPE_ABORT			4

#define IB_MGMT_RMPP_FLAG_ACTIVE		1
#define IB_MGMT_RMPP_FLAG_FIRST			(1<<1)
#define IB_MGMT_RMPP_FLAG_LAST			(1<<2)

#define IB_MGMT_RMPP_NO_RESPTIME		0x1F

#define	IB_MGMT_RMPP_STATUS_SUCCESS		0
#define	IB_MGMT_RMPP_STATUS_RESX		1
#define	IB_MGMT_RMPP_STATUS_ABORT_MIN		118
#define	IB_MGMT_RMPP_STATUS_T2L			118
#define	IB_MGMT_RMPP_STATUS_BAD_LEN		119
#define	IB_MGMT_RMPP_STATUS_BAD_SEG		120
#define	IB_MGMT_RMPP_STATUS_BADT		121
#define	IB_MGMT_RMPP_STATUS_W2S			122
#define	IB_MGMT_RMPP_STATUS_S2B			123
#define	IB_MGMT_RMPP_STATUS_BAD_STATUS		124
#define	IB_MGMT_RMPP_STATUS_UNV			125
#define	IB_MGMT_RMPP_STATUS_TMR			126
#define	IB_MGMT_RMPP_STATUS_UNSPEC		127
#define	IB_MGMT_RMPP_STATUS_ABORT_MAX		127

#define IB_QP0		0
#define IB_QP1		cpu_to_be32(1)
#define IB_QP1_QKEY	0x80010000
#define IB_QP_SET_QKEY	0x80000000

#define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
#define IB_DEFAULT_PKEY_FULL	0xFFFF

/*
 * Generic trap/notice types
 */
#define IB_NOTICE_TYPE_FATAL	0x80
#define IB_NOTICE_TYPE_URGENT	0x81
#define IB_NOTICE_TYPE_SECURITY	0x82
#define IB_NOTICE_TYPE_SM	0x83
#define IB_NOTICE_TYPE_INFO	0x84

/*
 * Generic trap/notice producers
 */
#define IB_NOTICE_PROD_CA		cpu_to_be16(1)
#define IB_NOTICE_PROD_SWITCH		cpu_to_be16(2)
#define IB_NOTICE_PROD_ROUTER		cpu_to_be16(3)
#define IB_NOTICE_PROD_CLASS_MGR	cpu_to_be16(4)

enum {
	IB_MGMT_MAD_HDR = 24,
	IB_MGMT_MAD_DATA = 232,
	IB_MGMT_RMPP_HDR = 36,
	IB_MGMT_RMPP_DATA = 220,
	IB_MGMT_VENDOR_HDR = 40,
	IB_MGMT_VENDOR_DATA = 216,
	IB_MGMT_SA_HDR = 56,
	IB_MGMT_SA_DATA = 200,
	IB_MGMT_DEVICE_HDR = 64,
	IB_MGMT_DEVICE_DATA = 192,
	IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
	OPA_MGMT_MAD_DATA = 2024,
	OPA_MGMT_RMPP_DATA = 2012,
	OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
};

struct ib_mad_hdr {
	u8	base_version;
	u8	mgmt_class;
	u8	class_version;
	u8	method;
	__be16	status;
	__be16	class_specific;
	__be64	tid;
	__be16	attr_id;
	__be16	resv;
	__be32	attr_mod;
};

struct ib_rmpp_hdr {
	u8	rmpp_version;
	u8	rmpp_type;
	u8	rmpp_rtime_flags;
	u8	rmpp_status;
	__be32	seg_num;
	__be32	paylen_newwin;
};

typedef u64 __bitwise ib_sa_comp_mask;

#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n)))

/*
 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
 * lay them out wrong otherwise.  (And unfortunately they are sent on
 * the wire so we can't change the layout)
 */
struct ib_sa_hdr {
	__be64			sm_key;
	__be16			attr_offset;
	__be16			reserved;
	ib_sa_comp_mask		comp_mask;
} __attribute__ ((packed));

struct ib_mad {
	struct ib_mad_hdr	mad_hdr;
	u8			data[IB_MGMT_MAD_DATA];
};

struct opa_mad {
	struct ib_mad_hdr	mad_hdr;
	u8			data[OPA_MGMT_MAD_DATA];
};

struct ib_rmpp_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	u8			data[IB_MGMT_RMPP_DATA];
};

struct opa_rmpp_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	u8			data[OPA_MGMT_RMPP_DATA];
};

struct ib_sa_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	struct ib_sa_hdr	sa_hdr;
	u8			data[IB_MGMT_SA_DATA];
} __attribute__ ((packed));

struct ib_vendor_mad {
	struct ib_mad_hdr	mad_hdr;
	struct ib_rmpp_hdr	rmpp_hdr;
	u8			reserved;
	u8			oui[3];
	u8			data[IB_MGMT_VENDOR_DATA];
};

#define IB_MGMT_CLASSPORTINFO_ATTR_ID	cpu_to_be16(0x0001)

#define IB_CLASS_PORT_INFO_RESP_TIME_MASK	0x1F
#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5

struct ib_class_port_info {
	u8			base_version;
	u8			class_version;
	__be16			capability_mask;
	  /* 27 bits for cap_mask2, 5 bits for resp_time */
	__be32			cap_mask2_resp_time;
	u8			redirect_gid[16];
	__be32			redirect_tcslfl;
	__be16			redirect_lid;
	__be16			redirect_pkey;
	__be32			redirect_qp;
	__be32			redirect_qkey;
	u8			trap_gid[16];
	__be32			trap_tcslfl;
	__be16			trap_lid;
	__be16			trap_pkey;
	__be32			trap_hlqp;
	__be32			trap_qkey;
};

/* PortInfo CapabilityMask */
enum ib_port_capability_mask_bits {
	IB_PORT_SM = 1 << 1,
	IB_PORT_NOTICE_SUP = 1 << 2,
	IB_PORT_TRAP_SUP = 1 << 3,
	IB_PORT_OPT_IPD_SUP = 1 << 4,
	IB_PORT_AUTO_MIGR_SUP = 1 << 5,
	IB_PORT_SL_MAP_SUP = 1 << 6,
	IB_PORT_MKEY_NVRAM = 1 << 7,
	IB_PORT_PKEY_NVRAM = 1 << 8,
	IB_PORT_LED_INFO_SUP = 1 << 9,
	IB_PORT_SM_DISABLED = 1 << 10,
	IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
	IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
	IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
	IB_PORT_CAP_MASK2_SUP = 1 << 15,
	IB_PORT_CM_SUP = 1 << 16,
	IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
	IB_PORT_REINIT_SUP = 1 << 18,
	IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
	IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
	IB_PORT_DR_NOTICE_SUP = 1 << 21,
	IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
	IB_PORT_BOOT_MGMT_SUP = 1 << 23,
	IB_PORT_LINK_LATENCY_SUP = 1 << 24,
	IB_PORT_CLIENT_REG_SUP = 1 << 25,
	IB_PORT_OTHER_LOCAL_CHANGES_SUP = 1 << 26,
	IB_PORT_LINK_SPEED_WIDTH_TABLE_SUP = 1 << 27,
	IB_PORT_VENDOR_SPECIFIC_MADS_TABLE_SUP = 1 << 28,
	IB_PORT_MCAST_PKEY_TRAP_SUPPRESSION_SUP = 1 << 29,
	IB_PORT_MCAST_FDB_TOP_SUP = 1 << 30,
	IB_PORT_HIERARCHY_INFO_SUP = 1ULL << 31,
};

enum ib_port_capability_mask2_bits {
	IB_PORT_SET_NODE_DESC_SUP		= 1 << 0,
	IB_PORT_EX_PORT_INFO_EX_SUP		= 1 << 1,
	IB_PORT_VIRT_SUP			= 1 << 2,
	IB_PORT_SWITCH_PORT_STATE_TABLE_SUP	= 1 << 3,
	IB_PORT_LINK_WIDTH_2X_SUP		= 1 << 4,
	IB_PORT_LINK_SPEED_HDR_SUP		= 1 << 5,
};

#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)

struct opa_class_port_info {
	u8 base_version;
	u8 class_version;
	__be16 cap_mask;
	__be32 cap_mask2_resp_time;

	u8 redirect_gid[16];
	__be32 redirect_tc_fl;
	__be32 redirect_lid;
	__be32 redirect_sl_qp;
	__be32 redirect_qkey;

	u8 trap_gid[16];
	__be32 trap_tc_fl;
	__be32 trap_lid;
	__be32 trap_hl_qp;
	__be32 trap_qkey;

	__be16 trap_pkey;
	__be16 redirect_pkey;

	u8 trap_sl_rsvd;
	u8 reserved[3];
} __packed;

/**
 * ib_get_cpi_resp_time - Returns the resp_time value from
 * cap_mask2_resp_time in ib_class_port_info.
 * @cpi: A struct ib_class_port_info mad.
 */
static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
{
	return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
		    IB_CLASS_PORT_INFO_RESP_TIME_MASK);
}

/**
 * ib_set_cpi_resptime - Sets the response time in an
 * ib_class_port_info mad.
 * @cpi: A struct ib_class_port_info.
 * @rtime: The response time to set.
 */
static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
					u8 rtime)
{
	cpi->cap_mask2_resp_time =
		(cpi->cap_mask2_resp_time &
		 cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
		cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
}

/**
 * ib_get_cpi_capmask2 - Returns the capmask2 value from
 * cap_mask2_resp_time in ib_class_port_info.
 * @cpi: A struct ib_class_port_info mad.
 */
static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
{
	return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
		IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}

/**
 * ib_set_cpi_capmask2 - Sets the capmask2 in an
 * ib_class_port_info mad.
 * @cpi: A struct ib_class_port_info.
 * @capmask2: The capmask2 to set.
 */
static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
				       u32 capmask2)
{
	cpi->cap_mask2_resp_time =
		(cpi->cap_mask2_resp_time &
		 cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
		cpu_to_be32(capmask2 <<
			    IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}

/**
 * opa_get_cpi_capmask2 - Returns the capmask2 value from
 * cap_mask2_resp_time in ib_class_port_info.
 * @cpi: A struct opa_class_port_info mad.
 */
static inline u32 opa_get_cpi_capmask2(struct opa_class_port_info *cpi)
{
	return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
		IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}

struct ib_mad_notice_attr {
	u8 generic_type;
	u8 prod_type_msb;
	__be16 prod_type_lsb;
	__be16 trap_num;
	__be16 issuer_lid;
	__be16 toggle_count;

	union {
		struct {
			u8	details[54];
		} raw_data;

		struct {
			__be16	reserved;
			__be16	lid;		/* where violation happened */
			u8	port_num;	/* where violation happened */
		} __packed ntc_129_131;

		struct {
			__be16	reserved;
			__be16	lid;		/* LID where change occurred */
			u8	reserved2;
			u8	local_changes;	/* low bit - local changes */
			__be32	new_cap_mask;	/* new capability mask */
			u8	reserved3;
			u8	change_flags;	/* low 3 bits only */
		} __packed ntc_144;

		struct {
			__be16	reserved;
			__be16	lid;		/* lid where sys guid changed */
			__be16	reserved2;
			__be64	new_sys_guid;
		} __packed ntc_145;

		struct {
			__be16	reserved;
			__be16	lid;
			__be16	dr_slid;
			u8	method;
			u8	reserved2;
			__be16	attr_id;
			__be32	attr_mod;
			__be64	mkey;
			u8	reserved3;
			u8	dr_trunc_hop;
			u8	dr_rtn_path[30];
		} __packed ntc_256;

		struct {
			__be16		reserved;
			__be16		lid1;
			__be16		lid2;
			__be32		key;
			__be32		sl_qp1;	/* SL: high 4 bits */
			__be32		qp2;	/* high 8 bits reserved */
			union ib_gid	gid1;
			union ib_gid	gid2;
		} __packed ntc_257_258;

	} details;
};

/**
 * ib_mad_send_buf - MAD data buffer and work request for sends.
 * @next: A pointer used to chain together MADs for posting.
 * @mad: References an allocated MAD data buffer for MADs that do not have
 *   RMPP active.  For MADs using RMPP, references the common and management
 *   class specific headers.
 * @mad_agent: MAD agent that allocated the buffer.
 * @ah: The address handle to use when sending the MAD.
 * @context: User-controlled context fields.
 * @hdr_len: Indicates the size of the data header of the MAD.  This length
 *   includes the common MAD, RMPP, and class specific headers.
 * @data_len: Indicates the total size of user-transferred data.
 * @seg_count: The number of RMPP segments allocated for this send.
 * @seg_size: Size of the data in each RMPP segment.  This does not include
 *   class specific headers.
 * @seg_rmpp_size: Size of each RMPP segment including the class specific
 *   headers.
 * @timeout_ms: Time to wait for a response.
 * @retries: Number of times to retry a request for a response.  For MADs
 *   using RMPP, this applies per window.  On completion, returns the number
 *   of retries needed to complete the transfer.
 *
 * Users are responsible for initializing the MAD buffer itself, with the
 * exception of any RMPP header.  Additional segment buffer space allocated
 * beyond data_len is padding.
 */
struct ib_mad_send_buf {
	struct ib_mad_send_buf	*next;
	void			*mad;
	struct ib_mad_agent	*mad_agent;
	struct ib_ah		*ah;
	void			*context[2];
	int			hdr_len;
	int			data_len;
	int			seg_count;
	int			seg_size;
	int			seg_rmpp_size;
	int			timeout_ms;
	int			retries;
};

/**
 * ib_response_mad - Returns if the specified MAD has been generated in
 *   response to a sent request or trap.
 */
int ib_response_mad(const struct ib_mad_hdr *hdr);

/**
 * ib_get_rmpp_resptime - Returns the RMPP response time.
 * @rmpp_hdr: An RMPP header.
 */
static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
{
	return rmpp_hdr->rmpp_rtime_flags >> 3;
}

/**
 * ib_get_rmpp_flags - Returns the RMPP flags.
 * @rmpp_hdr: An RMPP header.
 */
static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
{
	return rmpp_hdr->rmpp_rtime_flags & 0x7;
}

/**
 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
 * @rmpp_hdr: An RMPP header.
 * @rtime: The response time to set.
 */
static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
{
	rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
}

/**
 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
 * @rmpp_hdr: An RMPP header.
 * @flags: The flags to set.
 */
static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
{
	rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
				     (flags & 0x7);
}

struct ib_mad_agent;
struct ib_mad_send_wc;
struct ib_mad_recv_wc;

/**
 * ib_mad_send_handler - callback handler for a sent MAD.
 * @mad_agent: MAD agent that sent the MAD.
 * @mad_send_wc: Send work completion information on the sent MAD.
 */
typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
				    struct ib_mad_send_wc *mad_send_wc);

/**
 * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
 * @mad_agent: MAD agent that snooped the MAD.
 * @send_buf: send MAD data buffer.
 * @mad_send_wc: Work completion information on the sent MAD.  Valid
 *   only for snooping that occurs on a send completion.
 *
 * Clients snooping MADs should not modify data referenced by the @send_buf
 * or @mad_send_wc.
 */
typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
				     struct ib_mad_send_buf *send_buf,
				     struct ib_mad_send_wc *mad_send_wc);

/**
 * ib_mad_recv_handler - callback handler for a received MAD.
 * @mad_agent: MAD agent requesting the received MAD.
 * @send_buf: Send buffer if found, else NULL
 * @mad_recv_wc: Received work completion information on the received MAD.
 *
 * MADs received in response to a send request operation will be handed to
 * the user before the send operation completes.  All data buffers given
 * to registered agents through this routine are owned by the receiving
 * client, except for snooping agents.  Clients snooping MADs should not
 * modify the data referenced by @mad_recv_wc.
 */
typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
				    struct ib_mad_send_buf *send_buf,
				    struct ib_mad_recv_wc *mad_recv_wc);

/**
 * ib_mad_agent - Used to track MAD registration with the access layer.
 * @device: Reference to device registration is on.
 * @qp: Reference to QP used for sending and receiving MADs.
 * @mr: Memory region for system memory usable for DMA.
 * @recv_handler: Callback handler for a received MAD.
 * @send_handler: Callback handler for a sent MAD.
 * @snoop_handler: Callback handler for snooped sent MADs.
 * @context: User-specified context associated with this registration.
 * @hi_tid: Access layer assigned transaction ID for this client.
 *   Unsolicited MADs sent by this client will have the upper 32-bits
 *   of their TID set to this value.
 * @flags: registration flags
 * @port_num: Port number on which QP is registered
 * @rmpp_version: If set, indicates the RMPP version used by this agent.
 */
enum {
	IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
};
struct ib_mad_agent {
	struct ib_device	*device;
	struct ib_qp		*qp;
	ib_mad_recv_handler	recv_handler;
	ib_mad_send_handler	send_handler;
	ib_mad_snoop_handler	snoop_handler;
	void			*context;
	u32			hi_tid;
	u32			flags;
	u8			port_num;
	u8			rmpp_version;
	void			*security;
	bool			smp_allowed;
	bool			lsm_nb_reg;
	struct notifier_block   lsm_nb;
};

/**
 * ib_mad_send_wc - MAD send completion information.
 * @send_buf: Send MAD data buffer associated with the send MAD request.
 * @status: Completion status.
 * @vendor_err: Optional vendor error information returned with a failed
 *   request.
 */
struct ib_mad_send_wc {
	struct ib_mad_send_buf	*send_buf;
	enum ib_wc_status	status;
	u32			vendor_err;
};

/**
 * ib_mad_recv_buf - received MAD buffer information.
 * @list: Reference to next data buffer for a received RMPP MAD.
 * @grh: References a data buffer containing the global route header.
 *   The data refereced by this buffer is only valid if the GRH is
 *   valid.
 * @mad: References the start of the received MAD.
 */
struct ib_mad_recv_buf {
	struct list_head	list;
	struct ib_grh		*grh;
	union {
		struct ib_mad	*mad;
		struct opa_mad	*opa_mad;
	};
};

/**
 * ib_mad_recv_wc - received MAD information.
 * @wc: Completion information for the received data.
 * @recv_buf: Specifies the location of the received data buffer(s).
 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
 * @mad_len: The length of the received MAD, without duplicated headers.
 * @mad_seg_size: The size of individual MAD segments
 *
 * For received response, the wr_id contains a pointer to the ib_mad_send_buf
 *   for the corresponding send request.
 */
struct ib_mad_recv_wc {
	struct ib_wc		*wc;
	struct ib_mad_recv_buf	recv_buf;
	struct list_head	rmpp_list;
	int			mad_len;
	size_t			mad_seg_size;
};

/**
 * ib_mad_reg_req - MAD registration request
 * @mgmt_class: Indicates which management class of MADs should be receive
 *   by the caller.  This field is only required if the user wishes to
 *   receive unsolicited MADs, otherwise it should be 0.
 * @mgmt_class_version: Indicates which version of MADs for the given
 *   management class to receive.
 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
 *   in the range from 0x30 to 0x4f. Otherwise not used.
 * @method_mask: The caller will receive unsolicited MADs for any method
 *   where @method_mask = 1.
 *
 */
struct ib_mad_reg_req {
	u8	mgmt_class;
	u8	mgmt_class_version;
	u8	oui[3];
	DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
};

/**
 * ib_register_mad_agent - Register to send/receive MADs.
 * @device: The device to register with.
 * @port_num: The port on the specified device to use.
 * @qp_type: Specifies which QP to access.  Must be either
 *   IB_QPT_SMI or IB_QPT_GSI.
 * @mad_reg_req: Specifies which unsolicited MADs should be received
 *   by the caller.  This parameter may be NULL if the caller only
 *   wishes to receive solicited responses.
 * @rmpp_version: If set, indicates that the client will send
 *   and receive MADs that contain the RMPP header for the given version.
 *   If set to 0, indicates that RMPP is not used by this client.
 * @send_handler: The completion callback routine invoked after a send
 *   request has completed.
 * @recv_handler: The completion callback routine invoked for a received
 *   MAD.
 * @context: User specified context associated with the registration.
 * @registration_flags: Registration flags to set for this agent
 */
struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
					   u8 port_num,
					   enum ib_qp_type qp_type,
					   struct ib_mad_reg_req *mad_reg_req,
					   u8 rmpp_version,
					   ib_mad_send_handler send_handler,
					   ib_mad_recv_handler recv_handler,
					   void *context,
					   u32 registration_flags);

enum ib_mad_snoop_flags {
	/*IB_MAD_SNOOP_POSTED_SENDS	   = 1,*/
	/*IB_MAD_SNOOP_RMPP_SENDS	   = (1<<1),*/
	IB_MAD_SNOOP_SEND_COMPLETIONS	   = (1<<2),
	/*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
	IB_MAD_SNOOP_RECVS		   = (1<<4)
	/*IB_MAD_SNOOP_RMPP_RECVS	   = (1<<5),*/
	/*IB_MAD_SNOOP_REDIRECTED_QPS	   = (1<<6)*/
};

/**
 * ib_register_mad_snoop - Register to snoop sent and received MADs.
 * @device: The device to register with.
 * @port_num: The port on the specified device to use.
 * @qp_type: Specifies which QP traffic to snoop.  Must be either
 *   IB_QPT_SMI or IB_QPT_GSI.
 * @mad_snoop_flags: Specifies information where snooping occurs.
 * @send_handler: The callback routine invoked for a snooped send.
 * @recv_handler: The callback routine invoked for a snooped receive.
 * @context: User specified context associated with the registration.
 */
struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
					   u8 port_num,
					   enum ib_qp_type qp_type,
					   int mad_snoop_flags,
					   ib_mad_snoop_handler snoop_handler,
					   ib_mad_recv_handler recv_handler,
					   void *context);

/**
 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
 * @mad_agent: Corresponding MAD registration request to deregister.
 *
 * After invoking this routine, MAD services are no longer usable by the
 * client on the associated QP.
 */
void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);

/**
 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
 *   with the registered client.
 * @send_buf: Specifies the information needed to send the MAD(s).
 * @bad_send_buf: Specifies the MAD on which an error was encountered.  This
 *   parameter is optional if only a single MAD is posted.
 *
 * Sent MADs are not guaranteed to complete in the order that they were posted.
 *
 * If the MAD requires RMPP, the data buffer should contain a single copy
 * of the common MAD, RMPP, and class specific headers, followed by the class
 * defined data.  If the class defined data would not divide evenly into
 * RMPP segments, then space must be allocated at the end of the referenced
 * buffer for any required padding.  To indicate the amount of class defined
 * data being transferred, the paylen_newwin field in the RMPP header should
 * be set to the size of the class specific header plus the amount of class
 * defined data being transferred.  The paylen_newwin field should be
 * specified in network-byte order.
 */
int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
		     struct ib_mad_send_buf **bad_send_buf);


/**
 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
 * @mad_recv_wc: Work completion information for a received MAD.
 *
 * Clients receiving MADs through their ib_mad_recv_handler must call this
 * routine to return the work completion buffers to the access layer.
 */
void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);

/**
 * ib_cancel_mad - Cancels an outstanding send MAD operation.
 * @mad_agent: Specifies the registration associated with sent MAD.
 * @send_buf: Indicates the MAD to cancel.
 *
 * MADs will be returned to the user through the corresponding
 * ib_mad_send_handler.
 */
void ib_cancel_mad(struct ib_mad_agent *mad_agent,
		   struct ib_mad_send_buf *send_buf);

/**
 * ib_modify_mad - Modifies an outstanding send MAD operation.
 * @mad_agent: Specifies the registration associated with sent MAD.
 * @send_buf: Indicates the MAD to modify.
 * @timeout_ms: New timeout value for sent MAD.
 *
 * This call will reset the timeout value for a sent MAD to the specified
 * value.
 */
int ib_modify_mad(struct ib_mad_agent *mad_agent,
		  struct ib_mad_send_buf *send_buf, u32 timeout_ms);

/**
 * ib_redirect_mad_qp - Registers a QP for MAD services.
 * @qp: Reference to a QP that requires MAD services.
 * @rmpp_version: If set, indicates that the client will send
 *   and receive MADs that contain the RMPP header for the given version.
 *   If set to 0, indicates that RMPP is not used by this client.
 * @send_handler: The completion callback routine invoked after a send
 *   request has completed.
 * @recv_handler: The completion callback routine invoked for a received
 *   MAD.
 * @context: User specified context associated with the registration.
 *
 * Use of this call allows clients to use MAD services, such as RMPP,
 * on user-owned QPs.  After calling this routine, users may send
 * MADs on the specified QP by calling ib_mad_post_send.
 */
struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
					u8 rmpp_version,
					ib_mad_send_handler send_handler,
					ib_mad_recv_handler recv_handler,
					void *context);

/**
 * ib_process_mad_wc - Processes a work completion associated with a
 *   MAD sent or received on a redirected QP.
 * @mad_agent: Specifies the registered MAD service using the redirected QP.
 * @wc: References a work completion associated with a sent or received
 *   MAD segment.
 *
 * This routine is used to complete or continue processing on a MAD request.
 * If the work completion is associated with a send operation, calling
 * this routine is required to continue an RMPP transfer or to wait for a
 * corresponding response, if it is a request.  If the work completion is
 * associated with a receive operation, calling this routine is required to
 * process an inbound or outbound RMPP transfer, or to match a response MAD
 * with its corresponding request.
 */
int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
		      struct ib_wc *wc);

/**
 * ib_create_send_mad - Allocate and initialize a data buffer and work request
 *   for sending a MAD.
 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
 * @remote_qpn: Specifies the QPN of the receiving node.
 * @pkey_index: Specifies which PKey the MAD will be sent using.  This field
 *   is valid only if the remote_qpn is QP 1.
 * @rmpp_active: Indicates if the send will enable RMPP.
 * @hdr_len: Indicates the size of the data header of the MAD.  This length
 *   should include the common MAD header, RMPP header, plus any class
 *   specific header.
 * @data_len: Indicates the size of any user-transferred data.  The call will
 *   automatically adjust the allocated buffer size to account for any
 *   additional padding that may be necessary.
 * @gfp_mask: GFP mask used for the memory allocation.
 * @base_version: Base Version of this MAD
 *
 * This routine allocates a MAD for sending.  The returned MAD send buffer
 * will reference a data buffer usable for sending a MAD, along
 * with an initialized work request structure.  Users may modify the returned
 * MAD data buffer before posting the send.
 *
 * The returned MAD header, class specific headers, and any padding will be
 * cleared.  Users are responsible for initializing the common MAD header,
 * any class specific header, and MAD data area.
 * If @rmpp_active is set, the RMPP header will be initialized for sending.
 */
struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
					   u32 remote_qpn, u16 pkey_index,
					   int rmpp_active,
					   int hdr_len, int data_len,
					   gfp_t gfp_mask,
					   u8 base_version);

/**
 * ib_is_mad_class_rmpp - returns whether given management class
 * supports RMPP.
 * @mgmt_class: management class
 *
 * This routine returns whether the management class supports RMPP.
 */
int ib_is_mad_class_rmpp(u8 mgmt_class);

/**
 * ib_get_mad_data_offset - returns the data offset for a given
 * management class.
 * @mgmt_class: management class
 *
 * This routine returns the data offset in the MAD for the management
 * class requested.
 */
int ib_get_mad_data_offset(u8 mgmt_class);

/**
 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
 * @send_buf: Previously allocated send data buffer.
 * @seg_num: number of segment to return
 *
 * This routine returns a pointer to the data buffer of an RMPP MAD.
 * Users must provide synchronization to @send_buf around this call.
 */
void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);

/**
 * ib_free_send_mad - Returns data buffers used to send a MAD.
 * @send_buf: Previously allocated send data buffer.
 */
void ib_free_send_mad(struct ib_mad_send_buf *send_buf);

/**
 * ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
 * @agent: the agent in question
 * @return: true if agent is performing rmpp, false otherwise.
 */
int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);

#endif /* IB_MAD_H */